code stringlengths 1 1.72M | language stringclasses 1
value |
|---|---|
#!/usr/bin/env python
"""
Saves the database to a pickled file
"""
import os, sys
sys.path.append(os.path.join(os.getcwd(), '..'))
sys.path.append(os.getcwd())
# Set DJANGO_SETTINGS_MODULE appropriately.
os.environ['DJANGO_SETTINGS_MODULE'] = 'westom.settings'
import westom.feednut.models as DB
sys.path.pop()
import cPickle, re
from datetime import datetime
from django.db.models.base import ModelBase
from westom.feednut.utils import djangojson
def save_db(filename):
#first, let's figure out what tables we want to save
#by default, if no args are given, we save all
tables = []
if len(sys.argv) > 1:
tables = sys.argv[1].strip().lower().split(',')
else:
for item in dir(DB):
obj = getattr(DB, item)
if isinstance(obj, ModelBase) and item.find('Middle') == -1:
tables.append(item)
db_dict = {}
for item in tables:
if hasattr(DB, item):
obj = getattr(DB, item)
if isinstance(obj, ModelBase):
rows = obj.objects.all()
db_dict[obj.__name__] = rows
try:
os.makedirs(os.path.dirname(filename))
except:{}
file = open(filename, 'wb')
# cPickle.dump(djangojson.write(db_dict), file)
cPickle.dump(db_dict, file)
file.close()
if __name__ == '__main__':
filename = os.path.join(os.getcwd(), 'westom_%s.dmp' % (datetime.strftime(datetime.now(), '%Y-%m-%d-%M-%S')))
save_db(filename)
| Python |
import sys
from string import join, strip
from time import sleep
import os, sys
import md5
import urllib2
import logging
sys.path.append(os.path.join(os.getcwd(), '..'))
sys.path.append(os.getcwd())
# Set DJANGO_SETTINGS_MODULE appropriately.
os.environ['DJANGO_SETTINGS_MODULE'] = 'westom.settings'
sys.path.pop()
from westom.feednut.utils import search, feed_accomplice
from westom.feednut.libs.spider import Spider
inc = 0
class myspider(Spider):
global inc
def handle(self, html, urllist):
try:
logging.debug('increment: %s, url: %s, length of html: %s' % (inc, self.url, len(html)))
feeds = search.scrape_for_feeds(self.url)
logging.debug('feeds: %s' % (feeds))
for feed in feeds:
logging.debug('added/updated feed: %s' % (feed_accomplice.get_feed(str(feed))))
except Exception, e:
raise e
return urllist
def _webparser(self, html):
return Spider._webparser(self, html)
def _webopen(self, base):
global inc
t = self
sleep(2)
self.url = base[0]
inc = inc + 1
return Spider._webopen(self, base)
if __name__ == '__main__':
if len(sys.argv) > 1:
url = sys.argv[1]
s = myspider().weburls( base=url, width=10000, depth=10 )
print s
else:
print "you need to call this with a url like this, >>python mycrawler.py http://www.othermedia.com" | Python |
"""
Create some pucking sweet data for testing
"""
import os, sys
import md5
sys.path.append(os.path.join(os.getcwd(), '..'))
sys.path.append(os.getcwd())
# Set DJANGO_SETTINGS_MODULE appropriately.
os.environ['DJANGO_SETTINGS_MODULE'] = 'westom.settings'
from westom.feednut.models import *
sys.path.pop()
from westom.feednut.utils import feed_accomplice
from westom.feednut.utils import misc
import random
def clean():
User.objects.all().delete()
for feed in Feed.objects.all():
if os.path.exists(feed.get_data_path()):
os.unlink(feed.get_data_path())
feed.delete()
Tag.objects.all().delete()
def make_data():
#make some users
user1 = User.objects.create_user('tom', "tom@zellmania.com", 'password')
user2 = User.objects.create_user('wes', "wbornor@splaysh.com", 'password')
user3 = User.objects.create_user('slayer', "slayer@zellmania.com", 'slayer')
user4 = User.objects.create_user('guest', "guest@zellmania.com", 'slayer')
user1.is_staff=True
user2.is_staff=True
user2.is_superuser=True
user2.is_superuser=True
user1.save()
user2.save()
users = (user1, user2, user3, user4)
#for user in users:
#user.set_password(user.password)
#user.save()
#print 'Saved', user
#feed_accomplice.add_default_feeds(user)
#some feeds to add
feeds = [
'http://www.bloglines.com/rss/about/news',
'http://distrowatch.com/news/dw.xml',
'http://sports.espn.go.com/espn/rss/news',
'http://new.linuxjournal.com/node/feed',
'http://www.fool.com/xml/foolnews_rss091.xml',
'http://rss.slashdot.org/slashdot/eqWf',
'http://feeds.feedburner.com/spaceheadlines',
'http://www.wired.com/news_drop/netcenter/netcenter.rdf',
'http://rssfeeds.webmd.com/rss/rss.aspx?RSSSource=RSS_PUBLIC',
'http://rss.people.com/web/people/rss/ataol/topheadlines/index.xml',
'http://rss.people.com/web/people/rss/ataol/photogalleries/index.xml',
'http://jquery.com/blog/feed/',
'http://feeds.feedburner.com/jquery/',
]
#some random tags to possibly add
randomTags = ['cool', 'tech', 'fun', 'pr0n', 'work', 'splaysh', 'web']
for i, url in zip(range(len(feeds)), feeds):
print url
feed = feed_accomplice.get_feed(url)
if feed:
#randomly get some tags to tag this feed with
tags = random.sample(randomTags, random.randint(0, len(randomTags)-1))
user = users[random.randint(0, len(users) - 1)]
userfeed = feed_accomplice.add_userfeed(user, feed, tags=tags + ['fn:home'])
if __name__ == '__main__':
clean()
make_data()
# feed_accomplice.update_feeds()
| Python |
import os, sys
sys.path.append(os.path.join(os.getcwd(), '..'))
sys.path.append(os.getcwd())
# Set DJANGO_SETTINGS_MODULE appropriately.
os.environ['DJANGO_SETTINGS_MODULE'] = 'westom.settings'
os.environ['PYTHONINSPECT'] = '1'
from westom.feednut.utils import search
if __name__ == '__main__':
context = """<TABLE BORDER=0 CELLSPACING=0 CELLPADDING=0 WIDTH=375><TR><TD><img src="http://us.st1.yimg.com/store1.yimg.com/Img/trans_1x1.gif" HEIGHT=5 WIDTH=1 BORDER=0 ALT="pad"></TD></TR><TR VALIGN=TOP><TD WIDTH=375><IMG SRC="http://us.st11.yimg.com/us.st.yimg.com/I/paulgraham_1920_1059" WIDTH=12 HEIGHT=14 ALIGN=LEFT BORDER=0 HSPACE=0 VSPACE=0><FONT SIZE=2 FACE="verdana"><A HREF="gap.html">Mind the Gap</A><img src="http://us.st1.yimg.com/store1.yimg.com/Img/trans_1x1.gif" HEIGHT=2 WIDTH=1 BORDER=0 ALT="pad"><BR></FONT></TD></TR><TR><TD><img src="http://us.st1.yimg.com/store1.yimg.com/Img/trans_1x1.gif" HEIGHT=5 WIDTH=1 BORDER=0 ALT="pad"></TD></TR><TR VALIGN=TOP><TD WIDTH=375><IMG SRC="http://us.st11.yimg.com/us.st.yimg.com/I/paulgraham_1920_1059" WIDTH=12 HEIGHT=14 ALIGN=LEFT BORDER=0 HSPACE=0 VSPACE=0><FONT SIZE=2 FACE="verdana"><A HREF="startupmistakes.html">The 18 Mistakes That Kill Startups</A><img src="http://us.st1.yimg.com/store1.yimg.com/Img/trans_1x1.gif" HEIGHT=2 WIDTH=1 BORDER=0 ALT="pad"><BR></FONT></TD></TR><TR><TD><img src="http://us.st1.yimg.com/store1.yimg.com/Img/trans_1x1.gif" HEIGHT=5 WIDTH=1 BORDER=0 ALT="pad"></TD></TR><TR VALIGN=TOP><TD WIDTH=375><IMG SRC="http://us.st11.yimg.com/us.st.yimg.com/I/paulgraham_1920_1059" WIDTH=12 HEIGHT=14 ALIGN=LEFT BORDER=0 HSPACE=0 VSPACE=0><FONT SIZE=2 FACE="verdana"><A HREF="mit.html">A Student's Guide to Startups</A><img src="http://us.st1.yimg.com/store1.yimg.com/Img/trans_1x1.gif" HEIGHT=2 WIDTH=1 BORDER=0 ALT="pad"><BR></FONT></TD></TR><TR><TD><img src="http://us.st1.yimg.com/store1.yimg.com/Img/trans_1x1.gif" HEIGHT=5 WIDTH=1 BORDER=0 ALT="pad"></TD></TR><TR VALIGN=TOP><TD WIDTH=375><IMG SRC="http://us.st11.yimg.com/us.st.yimg.com/I/paulgraham_1920_1059" WIDTH=12 HEIGHT=14 ALIGN=LEFT BORDER=0 HSPACE=0 VSPACE=0><FONT SIZE=2 FACE="verdana"><A HREF="investors.html">"""
seed = ['Mind the Gap', "The 18 Mistakes That Kill Startups", "A Student's Guide to Startups"]
search.generate_feed(context, seed) | Python |
#!/usr/bin/env python
"""
Update all of the feeds
This is meant to be run on the server only
"""
import os, sys
import socket
DOC_ROOT = '/home/tzellman/webapps/feednut/'
sys.path.append(DOC_ROOT)
sys.path.append(os.path.join(os.getcwd(), '..'))
# Set DJANGO_SETTINGS_MODULE appropriately.
os.environ['DJANGO_SETTINGS_MODULE'] = 'westom.settings'
from westom.feednut.models import *
sys.path.pop()
from westom.feednut.utils import feed_accomplice
if __name__ == '__main__':
for i in range(1, len(sys.argv)):
arg = sys.argv[i]
args = arg.split('##')
for arg in args:
feed = feed_accomplice.updatefeed(arg)
if feed:
print 'Added feed: %s' % arg | Python |
#!/usr/bin/env python
"""
Saves the Syndic8 feeds to our DB
"""
import os, sys
sys.path.append(os.path.join(os.getcwd(), '..'))
sys.path.append(os.getcwd())
# Set DJANGO_SETTINGS_MODULE appropriately.
os.environ['DJANGO_SETTINGS_MODULE'] = 'westom.settings'
from westom.feednut.models import *
sys.path.pop()
import xmlrpclib
import cPickle
from westom.feednut.utils import feed_accomplice
#FEED_KEYS = ['python', 'cnn', 'espn', 'yahoo']
FEED_KEYS = ['cnn']
def find_feeds(query):
server = xmlrpclib.ServerProxy("http://www.syndic8.com/xmlrpc.php")
syndic8 = server.syndic8
ids = syndic8.FindFeeds(query)
# fields = ['feedid', 'sitename', 'siteurl', 'dataurl', 'description']
return syndic8.GetFeedInfo(ids)
def search_site(query):
server = xmlrpclib.ServerProxy("http://www.syndic8.com/xmlrpc.php")
syndic8 = server.syndic8
ids = syndic8.FindSites(query)
return syndic8.GetFeedInfo(ids)
if __name__ == '__main__':
if len(sys.argv) > 2:
if sys.argv[1] == 'find':
for key in sys.argv[2:]:
print 'Searching for %s' % key
feeds = find_feeds(key)
for feed in feeds:
newfeed = feed_accomplice.get_feed(feed['dataurl'])
if newfeed:
print 'Saved Feed!: %s' % newfeed.title
elif sys.argv[1] == 'site':
for key in sys.argv[2:]:
print 'Searching for %s' % key
feeds = search_site(key)
for feed in feeds:
newfeed = feed_accomplice.get_feed(feed['dataurl'])
if newfeed:
print 'Saved Feed!: %s' % newfeed.title
# file = open(os.path.join(os.getcwd(), 'syndic8Feeds.dmp'), 'wb')
# cPickle.dump(feeds, file)
# file.close()
# for feed in feeds:
# print feed
# server = xmlrpclib.ServerProxy("http://www.syndic8.com/xmlrpc.php")
# syndic8 = server.syndic8
## print syndic8.GetFeedCount()
# fields = syndic8.GetFeedFields()
# print fields
# ids = syndic8.QueryFeeds('feedid', '>', '0')
# print len(ids)
# print ids
#
| Python |
#!/usr/bin/env python
"""
Saves the database to a pickled file
"""
import os, sys
sys.path.append(os.path.join(os.getcwd(), '..'))
sys.path.append(os.getcwd())
# Set DJANGO_SETTINGS_MODULE appropriately.
os.environ['DJANGO_SETTINGS_MODULE'] = 'westom.settings'
import westom.feednut.models as DB
sys.path.pop()
import cPickle, re
from datetime import datetime
from django.db.models.base import ModelBase
from westom.feednut.utils import djangojson
def save_db(filename):
#first, let's figure out what tables we want to save
#by default, if no args are given, we save all
tables = []
if len(sys.argv) > 1:
tables = sys.argv[1].strip().lower().split(',')
else:
for item in dir(DB):
obj = getattr(DB, item)
if isinstance(obj, ModelBase) and item.find('Middle') == -1:
tables.append(item)
db_dict = {}
for item in tables:
if hasattr(DB, item):
obj = getattr(DB, item)
if isinstance(obj, ModelBase):
rows = obj.objects.all()
db_dict[obj.__name__] = rows
try:
os.makedirs(os.path.dirname(filename))
except:{}
file = open(filename, 'wb')
# cPickle.dump(djangojson.write(db_dict), file)
cPickle.dump(db_dict, file)
file.close()
if __name__ == '__main__':
filename = os.path.join(os.getcwd(), 'westom_%s.dmp' % (datetime.strftime(datetime.now(), '%Y-%m-%d-%M-%S')))
save_db(filename)
| Python |
#!/usr/bin/env python
"""
Restores the database from a pickled file
"""
import os, sys
sys.path.append(os.path.join(os.getcwd(), '..'))
sys.path.append(os.getcwd())
# Set DJANGO_SETTINGS_MODULE appropriately.
os.environ['DJANGO_SETTINGS_MODULE'] = 'westom.settings'
import westom.feednut.models as DB
sys.path.pop()
import cPickle
def restore_db(filename):
if os.path.exists(filename):
file = open(filename, 'r')
db_dict = cPickle.load(file)
for table, rows in db_dict.iteritems():
for row in rows:
try:
row.save()
except:{}
file.close()
if __name__ == '__main__':
if len(sys.argv) > 1:
print 'Restoring...'
restore_db(sys.argv[1]) | Python |
#!/usr/bin/env python
"""
Update all of the feeds
This is meant to be run on the server only
"""
import os, sys
import socket
DOC_ROOT = '/home/tzellman/webapps/feednut/'
sys.path.append(DOC_ROOT)
# Set DJANGO_SETTINGS_MODULE appropriately.
os.environ['DJANGO_SETTINGS_MODULE'] = 'westom.settings'
from westom.feednut.models import *
sys.path.pop()
from westom.feednut.utils import feed_accomplice
if __name__ == '__main__':
# make a short timeout so dead feeds don't cause crawler to have a long runtime
if hasattr(socket, 'setdefaulttimeout'):
socket.setdefaulttimeout(5)
feed_accomplice.update_feeds() | Python |
#!/usr/bin/env python
"""
Saves the Syndic8 feeds to our DB
"""
import os, sys
sys.path.append(os.path.join(os.getcwd(), '..'))
sys.path.append(os.getcwd())
# Set DJANGO_SETTINGS_MODULE appropriately.
os.environ['DJANGO_SETTINGS_MODULE'] = 'westom.settings'
from westom.feednut.models import *
sys.path.pop()
import xmlrpclib
import cPickle
from westom.feednut.utils import feed_accomplice
#FEED_KEYS = ['python', 'cnn', 'espn', 'yahoo']
FEED_KEYS = ['cnn']
def find_feeds(query):
server = xmlrpclib.ServerProxy("http://www.syndic8.com/xmlrpc.php")
syndic8 = server.syndic8
ids = syndic8.FindFeeds(query)
# fields = ['feedid', 'sitename', 'siteurl', 'dataurl', 'description']
return syndic8.GetFeedInfo(ids)
def search_site(query):
server = xmlrpclib.ServerProxy("http://www.syndic8.com/xmlrpc.php")
syndic8 = server.syndic8
ids = syndic8.FindSites(query)
return syndic8.GetFeedInfo(ids)
if __name__ == '__main__':
if len(sys.argv) > 2:
if sys.argv[1] == 'find':
for key in sys.argv[2:]:
print 'Searching for %s' % key
feeds = find_feeds(key)
for feed in feeds:
newfeed = feed_accomplice.get_feed(feed['dataurl'])
if newfeed:
print 'Saved Feed!: %s' % newfeed.title
elif sys.argv[1] == 'site':
for key in sys.argv[2:]:
print 'Searching for %s' % key
feeds = search_site(key)
for feed in feeds:
newfeed = feed_accomplice.get_feed(feed['dataurl'])
if newfeed:
print 'Saved Feed!: %s' % newfeed.title
# file = open(os.path.join(os.getcwd(), 'syndic8Feeds.dmp'), 'wb')
# cPickle.dump(feeds, file)
# file.close()
# for feed in feeds:
# print feed
# server = xmlrpclib.ServerProxy("http://www.syndic8.com/xmlrpc.php")
# syndic8 = server.syndic8
## print syndic8.GetFeedCount()
# fields = syndic8.GetFeedFields()
# print fields
# ids = syndic8.QueryFeeds('feedid', '>', '0')
# print len(ids)
# print ids
#
| Python |
#!/usr/bin/env python
"""
Update all of the feeds
This is meant to be run on the server only
"""
import os, sys
import socket
DOC_ROOT = '/home/tzellman/webapps/feednut/'
sys.path.append(DOC_ROOT)
sys.path.append(os.path.join(os.getcwd(), '..'))
# Set DJANGO_SETTINGS_MODULE appropriately.
os.environ['DJANGO_SETTINGS_MODULE'] = 'westom.settings'
from westom.feednut.models import *
sys.path.pop()
from westom.feednut.utils import feed_accomplice
if __name__ == '__main__':
for i in range(1, len(sys.argv)):
arg = sys.argv[i]
args = arg.split('##')
for arg in args:
feed = feed_accomplice.updatefeed(arg)
if feed:
print 'Added feed: %s' % arg | Python |
#!/usr/bin/env python
"""
Restores the database from a pickled file
"""
import os, sys
sys.path.append(os.path.join(os.getcwd(), '..'))
sys.path.append(os.getcwd())
# Set DJANGO_SETTINGS_MODULE appropriately.
os.environ['DJANGO_SETTINGS_MODULE'] = 'westom.settings'
import westom.feednut.models as DB
sys.path.pop()
import cPickle
def restore_db(filename):
if os.path.exists(filename):
file = open(filename, 'r')
db_dict = cPickle.load(file)
for table, rows in db_dict.iteritems():
for row in rows:
try:
row.save()
except:{}
file.close()
if __name__ == '__main__':
if len(sys.argv) > 1:
print 'Restoring...'
restore_db(sys.argv[1]) | Python |
import re, sys, subprocess, shutil, glob, os, tempfile
def compress(files, outfile, use_packer=False):
temp, tempname = tempfile.mkstemp()
for f in files:
fo = open(f, 'r')
os.write(temp, fo.read())
fo.close()
os.close(temp)
#now, compress it -- commented out cuz jquery was having problems w/it
if use_packer:
if sys.platform == 'win32':
p = subprocess.Popen(
['CScript', '/nologo', 'packer\pack.wsf', tempname],
stdout=subprocess.PIPE,
)
else:
p = subprocess.Popen(
['perl', '-Ipacker', 'packer/jsPacker.pl', '-q', '-i', tempname],
stdout=subprocess.PIPE,
)
else:
p = subprocess.Popen(
['java', '-jar', 'custom_rhino.jar', '-c', tempname],
stdout=subprocess.PIPE,
)
outf = file(outfile, 'w')
#print >>outf, """/***\n(c) 2006 Westom LLC. All rights Reserved.\n***/
shutil.copyfileobj(p.stdout, outf)
os.unlink(tempname)
outf.write('\n')
outf.flush()
outf.close()
def make_jquery():
""" this is separate cuz we might want to combine several into one """
files = ['../feednut/media/js/raw/jquery.js']
# files.append('../feednut/media/js/raw/jquery/plugins/dom.js')
# files.append('../feednut/media/js/raw/jquery/plugins/interface/ifx.js')
# files.append('../feednut/media/js/raw/jquery/plugins/interface/idrag.js')
# files.append('../feednut/media/js/raw/jquery/plugins/interface/idrop.js')
# files.append('../feednut/media/js/raw/jquery/plugins/interface/iselect.js')
# files.extend(glob.glob('../db/media/js/raw/jquery/plugins/interface/*.js'))
compress(files, '../feednut/media/js/jquery.js')
if __name__ == '__main__':
#rhino is having trouble w/feednut, so using packer
compress(['../feednut/media/js/raw/feednut.js'], '../feednut/media/js/feednut.js', use_packer=True)
make_jquery()
#and some others.. each of these, just move up one dir
files = ['thickbox.js', 'dragdrop.js', 'drag.js', 'thickbox.js',
'lightbox.js', 'nifty.js', 'pngfix.js', 'MochiKit.js',
'coordinates.js']
for f in files:
compress(['../feednut/media/js/raw/%s' % f], '../feednut/media/js/%s' % f)
| Python |
#!/usr/bin/env python
"""
Update all of the feeds
This is meant to be run on the server only
"""
import os, sys
import socket
DOC_ROOT = '/home/tzellman/webapps/feednut/'
sys.path.append(DOC_ROOT)
# Set DJANGO_SETTINGS_MODULE appropriately.
os.environ['DJANGO_SETTINGS_MODULE'] = 'westom.settings'
from westom.feednut.models import *
sys.path.pop()
from westom.feednut.utils import feed_accomplice
if __name__ == '__main__':
# make a short timeout so dead feeds don't cause crawler to have a long runtime
if hasattr(socket, 'setdefaulttimeout'):
socket.setdefaulttimeout(5)
feed_accomplice.update_feeds() | Python |
import re, sys, subprocess, shutil
if __name__ == '__main__':
infile = file('media/js/westom.js')
# outf = file('media/js/fn.js')
outf = sys.stdout
p = subprocess.Popen(
['java', '-jar', 'scripts/custom_rhino.jar', '-c', infile.name],
stdout=subprocess.PIPE,
)
print >>outf, """/***\n(c) 2006 Westom LLC. All rights Reserved.\n***/
""" % locals()
shutil.copyfileobj(p.stdout, outf)
outf.write('\n')
outf.flush()
outf.close()
| Python |
#!/usr/bin/env python
from django.core.management import execute_manager
try:
import settings # Assumed to be in the same directory.
except ImportError:
import sys
sys.stderr.write("Error: Can't find the file 'settings.py' in the directory containing %r. It appears you've customized things.\nYou'll have to run django-admin.py, passing it your settings module.\n" % __file__)
sys.exit(1)
import MySQLdb, sys
def freshen():
"""
Helper function that deletes and creates the database
"""
#let's delete the database
db=MySQLdb.connect(db=settings.DATABASE_NAME,
user=settings.DATABASE_USER,
passwd=settings.DATABASE_PASSWORD)
c=db.cursor()
c.execute("DROP DATABASE " + settings.DATABASE_NAME)
print 'Dropped database: ' + settings.DATABASE_NAME
c.execute("CREATE DATABASE " + settings.DATABASE_NAME)
print 'Created database: ' + settings.DATABASE_NAME
sys.argv.remove('fresh')
#first, let's init it
sys.argv = [sys.argv[0], 'syncdb']
execute_manager(settings)
#now, update some fields that should be blobs
c.execute("USE %s" % settings.DATABASE_NAME)
# c.execute("ALTER TABLE FEEDNUT_FEED MODIFY FEED_DATA BLOB NOT NULL")
c.execute("ALTER TABLE FEEDNUT_FEED CONVERT TO CHARACTER SET utf8")
c.execute("ALTER TABLE FEEDNUT_FEEDENTRY CONVERT TO CHARACTER SET utf8")
db.close()
print 'Synced database'
if __name__ == "__main__":
if len(sys.argv) > 1 and sys.argv[1] == 'fresh':
freshen()
elif len(sys.argv) > 1 and sys.argv[1] == 'compress':
#compresses the javascripts
import os, subprocess
os.chdir('scripts')
p = subprocess.Popen(
['python', 'compressJS.py'],
stdout=subprocess.PIPE,
)
#otherwise, process normally...
else:
execute_manager(settings)
| Python |
from django.conf.urls.defaults import *
from westom.settings import DOCUMENT_ROOT
import os
from westom.feednut.feeds import HottestFeed, RecentRead, ReadLater
handler404 = 'westom.feednut.views.page_not_found'
#register these two feeds
feeds = {
'hottest': HottestFeed,
'latest': RecentRead,
'readlater': ReadLater,
}
urlpatterns = patterns('',
(r'^$', 'westom.feednut.views.index'),
(r'^gpalert/', include('django.contrib.admin.urls')),
#these deal with registration/login
(r'^login/$', 'westom.feednut.views.login'),
(r'^login/forgot/$', 'westom.feednut.views.forgot_password'),
(r'^login/reset/$', 'westom.feednut.views.reset_password'),
(r'^accounts/login/$', 'westom.feednut.views.unauthenticated'), #required for django's @login_required decorator
(r'^logout/$', 'westom.feednut.views.logout'),
(r'^register/$','westom.feednut.captcha.verify',
dict( forward_to='westom.feednut.views.register', )),
(r'^captcha/i/$', 'westom.feednut.captcha.image'),
(r'^captcha/g/$', 'westom.feednut.views.new_captcha'),
(r'^static/(?P<path>.*)$', 'django.views.static.serve', {'document_root': os.path.join(DOCUMENT_ROOT, 'feednut/media/')}),
(r'^form/subscribe/$', 'westom.feednut.views.form_subscribe'),
(r'^feed/subscribe/$', 'westom.feednut.views.subscribe'),
(r'^feed/search/$', 'westom.feednut.views.search_feeds'),
(r'^feed/tag/$', 'westom.feednut.views.tag_feed'),
(r'^feed/remove/$', 'westom.feednut.views.remove_feed'),
(r'^feed/read/$', 'westom.feednut.views.read_article'),
(r'^feed/readlater/$', 'westom.feednut.views.read_later'),
(r'^feed/generated/$', 'westom.feednut.views.generate_feed'),
(r'^feed/(?P<id>\w+)/$', 'westom.feednut.views.get_feed'),
(r'^help/terms/$', 'westom.feednut.views.pass_through', {'page': 'terms.html'}),
(r'^help/faq/$', 'westom.feednut.views.pass_through', {'page': 'faq.html'}),
#site-wide RSS feeds
(r'^.rss$', 'westom.feednut.feeds.feed', {'url' : 'hottest', 'feed_dict': feeds}),
(r'^hottest.rss$', 'westom.feednut.feeds.feed', {'url' : 'hottest', 'feed_dict': feeds}),
(r'^latest.rss$', 'westom.feednut.feeds.feed', {'url' : 'latest', 'feed_dict': feeds}),
#User endpoints
(r'^(?P<username>\w+)/$', 'westom.feednut.views.get_user_page'),
(r'^(?P<username>\w+)/tags/$', 'westom.feednut.views.get_user_tags'),
(r'^(?P<username>\w+)/feed/(?P<id>\w+)/$', 'westom.feednut.views.userfeed_action'),
(r'^(?P<username>\w+)/tags/(?P<tags>[fn:]*\w+[-\w]*)/$', 'westom.feednut.views.get_user_page'),
# (r'^(?P<username>\w+)/(?P<tag>fn:?\w+)/$', 'westom.feednut.views.get_user_page_with_tag'),
# (r'^(?P<username>\w+)/update/$', 'westom.feednut.views.update_account'),
# (r'^(?P<username>\w+)/bookmark/$', 'westom.feednut.views.bookmark'),
(r'^(?P<username>\w+)/.rss$', 'westom.feednut.feeds.user_feed', {'url' : 'hottest', 'feed_dict': feeds}),
(r'^(?P<username>\w+)/hottest.rss$', 'westom.feednut.feeds.user_feed', {'url' : 'hottest', 'feed_dict': feeds}),
(r'^(?P<username>\w+)/latest.rss$', 'westom.feednut.feeds.user_feed', {'url' : 'latest', 'feed_dict': feeds}),
(r'^(?P<username>\w+)/readlater.rss$', 'westom.feednut.feeds.user_feed', {'url' : 'readlater', 'feed_dict': feeds}),
(r'^(?P<username>\w+)/subscriptions/import/$', 'westom.feednut.views.import_subscriptions'),
(r'^(?P<username>\w+)/subscriptions/export/$', 'westom.feednut.views.export_subscriptions'),
(r'^(?P<username>\w+)/buddies/add/$', 'westom.feednut.views.add_user_buddy'),
(r'^(?P<username>\w+)/buddies/del/$', 'westom.feednut.views.remove_user_buddy'),
(r'^(?P<username>\w+)/buddies/$', 'westom.feednut.views.get_user_buddies'),
#these are misc. functionalities
# Uncomment this for admin:
#(r'^admin/', include('django.contrib.admin.urls')),
)
| Python |
# Django settings for westom project.
import sys, os, inspect
# I added this so that we can always refer to it for the current
# document root. This will be helpful when we move to Apache
DOCUMENT_ROOT = os.getcwd()
curframe = inspect.currentframe()
try:
DOCUMENT_ROOT = os.path.abspath(os.path.dirname(inspect.getframeinfo(curframe)[0]))
finally:
del curframe
#change this to point to where you have/want the feeds stored
FEEDS_DIR = os.path.join(DOCUMENT_ROOT, '../feeds')
DEBUG = True
TEMPLATE_DEBUG = DEBUG
URL_HOST = 'http://localhost:8000'
ADMINS = (
# ('Your Name', 'your_email@domain.com'),
)
MANAGERS = ADMINS
DATABASE_ENGINE = 'mysql' # 'postgresql', 'mysql', 'sqlite3' or 'ado_mssql'.
DATABASE_NAME = '' # Or path to database file if using sqlite3.
DATABASE_USER = '' # Not used with sqlite3.
#DATABASE_NAME = ''
#DATABASE_USER = ''
DATABASE_PASSWORD = '' # Not used with sqlite3.
DATABASE_HOST = '' # Set to empty string for localhost. Not used with sqlite3.
DATABASE_PORT = '' # Set to empty string for default. Not used with sqlite3.
## MAIL SETTINGS
EMAIL_HOST = ""
EMAIL_PORT = 25
DEFAULT_FROM_EMAIL = ""
EMAIL_HOST_USER = ''
EMAIL_HOST_PASSWORD = ''
# Local time zone for this installation. All choices can be found here:
# http://www.postgresql.org/docs/current/static/datetime-keywords.html#DATETIME-TIMEZONE-SET-TABLE
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.w3.org/TR/REC-html40/struct/dirlang.html#langcodes
# http://blogs.law.harvard.edu/tech/stories/storyReader$15
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
#uncomment these on the server
#SESSION_COOKIE_DOMAIN = '.feednut.com'
#APPEND_SLASH = True
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT.
# Example: "http://media.lawrence.com"
MEDIA_URL = '/static'
# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/".
ADMIN_MEDIA_PREFIX = '/media/'
# Make this unique, and don't share it with anybody.
SECRET_KEY = '()_bph_=kn&gfbub($-wwzr=h*j1^k+0srce%x1br5z%+mk$ze'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.load_template_source',
'django.template.loaders.app_directories.load_template_source',
# 'django.template.loaders.eggs.load_template_source',
)
MIDDLEWARE_CLASSES = (
"django.middleware.common.CommonMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.middleware.doc.XViewMiddleware",
# "westom.rewrite.RewiteLocalForwardedRequest",
)
ROOT_URLCONF = 'westom.urls'
TEMPLATE_DIRS = (
os.path.join(DOCUMENT_ROOT, 'feednut/templates'),
)
TEMPLATE_CONTEXT_PROCESSORS = (
"django.core.context_processors.auth",
"django.core.context_processors.debug",
"django.core.context_processors.i18n",
"westom.feednut.context_processors.default",
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.admin',
'westom.feednut',
)
#setup the logger -- need to replace the \\ on windows
from logging.config import fileConfig
LOG_PATH = os.path.abspath(os.path.join(DOCUMENT_ROOT, '../log/feednut.log')).replace('\\', '/')
try:
os.makedirs(os.path.dirname(LOG_PATH))
except:{}
fileConfig(os.path.join(DOCUMENT_ROOT, 'logging.config'), defaults={'log_path':LOG_PATH}) | Python |
from distutils.core import setup
setup(
name = "django-jython",
version = "1.3.0b1",
packages = ['doj',
'doj.backends',
'doj.backends.zxjdbc',
'doj.backends.zxjdbc.postgresql',
'doj.backends.zxjdbc.oracle',
'doj.backends.zxjdbc.mysql',
'doj.backends.zxjdbc.mysql.constants',
'doj.backends.zxjdbc.sqlite3',
'doj.backends.zxjdbc.mssql2k',
'doj.management',
'doj.management.commands',
'doj.test',
'doj.test.xmlrunner'],
package_data = {
'doj.management.commands': ['war_skel/application.py',
'war_skel/WEB-INF/web.xml',
'war_skel/WEB-INF/lib/*',
'war_skel/WEB-INF/lib-python/README']},
# metadata for upload to PyPI
author = "Josh Juneau",
author_email = "juneau001@gmail.com",
description = "Database backends and management commands, for development under Django/Jython",
license = "BSD",
keywords = "django jython database java",
url = "http://code.google.com/p/django-jython/",
classifiers = [
"Development Status :: 4 - Beta",
"Framework :: Django",
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
"Programming Language :: Python",
"Programming Language :: Java"
]
)
| Python |
#!/usr/bin/env python
from django.core.management import execute_manager
try:
import settings # Assumed to be in the same directory.
except ImportError:
import sys
sys.stderr.write("Error: Can't find the file 'settings.py' in the directory containing %r. It appears you've customized things.\nYou'll have to run django-admin.py, passing it your settings module.\n(If the file settings.py does indeed exist, it's causing an ImportError somehow.)\n" % __file__)
sys.exit(1)
if __name__ == "__main__":
execute_manager(settings)
| Python |
from django.conf.urls.defaults import *
# Uncomment the next two lines to enable the admin:
# from django.contrib import admin
# admin.autodiscover()
urlpatterns = patterns('',
# Example:
# (r'^overlapping_media_url_and_admin_media/', include('overlapping_media_url_and_admin_media.foo.urls')),
# Uncomment the admin/doc line below and add 'django.contrib.admindocs'
# to INSTALLED_APPS to enable admin documentation:
# (r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
# (r'^admin/', include(admin.site.urls)),
)
| Python |
# Django settings for overlapping_media_url_and_admin_media project.
import os
PROJECT_ROOT = os.path.dirname(__file__)
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@domain.com'),
)
MANAGERS = ADMINS
DATABASE_ENGINE = '' # 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
DATABASE_NAME = '' # Or path to database file if using sqlite3.
DATABASE_USER = '' # Not used with sqlite3.
DATABASE_PASSWORD = '' # Not used with sqlite3.
DATABASE_HOST = '' # Set to empty string for localhost. Not used with sqlite3.
DATABASE_PORT = '' # Set to empty string for default. Not used with sqlite3.
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = PROJECT_ROOT + '/media'
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
MEDIA_URL = '/media/'
# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/".
ADMIN_MEDIA_PREFIX = '/media/admin/'
# Make this unique, and don't share it with anybody.
SECRET_KEY = '_m5#@9lz7*e&ru(iyhhz18#v^$=oio-#hrpnbb7jz%h4a4k4k#'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.load_template_source',
'django.template.loaders.app_directories.load_template_source',
# 'django.template.loaders.eggs.load_template_source',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
)
ROOT_URLCONF = 'overlapping_media_url_and_admin_media.urls'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.admin',
'doj'
)
| Python |
#!/usr/bin/env python
from django.core.management import execute_manager
try:
import settings # Assumed to be in the same directory.
except ImportError:
import sys
sys.stderr.write("Error: Can't find the file 'settings.py' in the directory containing %r. It appears you've customized things.\nYou'll have to run django-admin.py, passing it your settings module.\n(If the file settings.py does indeed exist, it's causing an ImportError somehow.)\n" % __file__)
sys.exit(1)
if __name__ == "__main__":
execute_manager(settings)
| Python |
from django.conf.urls.defaults import *
# Uncomment the next two lines to enable the admin:
# from django.contrib import admin
# admin.autodiscover()
urlpatterns = patterns('',
# Example:
# (r'^overlapping_media_url_and_admin_media/', include('overlapping_media_url_and_admin_media.foo.urls')),
# Uncomment the admin/doc line below and add 'django.contrib.admindocs'
# to INSTALLED_APPS to enable admin documentation:
# (r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
# (r'^admin/', include(admin.site.urls)),
)
| Python |
# Django settings for overlapping_media_url_and_admin_media project.
import os
PROJECT_ROOT = os.path.dirname(__file__)
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@domain.com'),
)
MANAGERS = ADMINS
DATABASE_ENGINE = '' # 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
DATABASE_NAME = '' # Or path to database file if using sqlite3.
DATABASE_USER = '' # Not used with sqlite3.
DATABASE_PASSWORD = '' # Not used with sqlite3.
DATABASE_HOST = '' # Set to empty string for localhost. Not used with sqlite3.
DATABASE_PORT = '' # Set to empty string for default. Not used with sqlite3.
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = PROJECT_ROOT + '/media/'
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
MEDIA_URL = '/media/'
# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/".
ADMIN_MEDIA_PREFIX = '/media/'
# Make this unique, and don't share it with anybody.
SECRET_KEY = '_m5#@9lz7*e&ru(iyhhz18#v^$=oio-#hrpnbb7jz%h4a4k4k#'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.load_template_source',
'django.template.loaders.app_directories.load_template_source',
# 'django.template.loaders.eggs.load_template_source',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
)
ROOT_URLCONF = 'overlapping_media_url_and_admin_media.urls'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.admin',
'doj'
)
| Python |
from django.db import models
# Create your models here.
| Python |
# Create your views here.
| Python |
from django.db import models
# Create your models here.
| Python |
# Create your views here.
| Python |
#!/usr/bin/env python
from django.core.management import execute_manager
try:
import settings # Assumed to be in the same directory.
except ImportError:
import sys
sys.stderr.write("Error: Can't find the file 'settings.py' in the directory containing %r. It appears you've customized things.\nYou'll have to run django-admin.py, passing it your settings module.\n(If the file settings.py does indeed exist, it's causing an ImportError somehow.)\n" % __file__)
sys.exit(1)
if __name__ == "__main__":
execute_manager(settings)
| Python |
from django.conf.urls.defaults import *
# Uncomment the next two lines to enable the admin:
# from django.contrib import admin
# admin.autodiscover()
urlpatterns = patterns('',
# Example:
# (r'^project/', include('project.foo.urls')),
# Uncomment the admin/doc line below and add 'django.contrib.admindocs'
# to INSTALLED_APPS to enable admin documentation:
# (r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
# (r'^admin/(.*)', admin.site.root),
)
| Python |
# Django settings for project project.
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@domain.com'),
)
MANAGERS = ADMINS
DATABASE_ENGINE = ''
DATABASE_NAME = ''
DATABASE_USER = ''
DATABASE_PASSWORD = ''
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
MEDIA_URL = ''
# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/".
ADMIN_MEDIA_PREFIX = '/media/'
# Make this unique, and don't share it with anybody.
SECRET_KEY = '_8lxqg5u^s1%lx85h^2v)d#mh0ts4yjw0%j=6$vx-j$o7$i6c-'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.load_template_source',
'django.template.loaders.app_directories.load_template_source',
# 'django.template.loaders.eggs.load_template_source',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
)
ROOT_URLCONF = 'project.urls'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'other_pkg.other_app',
'other_pkg.yet_another_app',
'doj'
)
| Python |
from django.core.management.base import BaseCommand
from django.conf import settings
import os
class Command(BaseCommand):
subcommands = {
"jndiconfig": "Prints a sample context XML configuration with the "
"appropriate JNDI datasource configuration for "
"connection pooling"
}
subcommands_help = "\n\n".join("%s\n %s" % (subcmd, subhelp)
for subcmd, subhelp in subcommands.items())
help = ("Utilities for deployment and configuration on tomcat\n\n"
"Subcommands:\n\n" + subcommands_help)
def handle(self, *args, **options):
if not args:
print "You need to pass one subcommand as argument"
print "Available subcommands:\n"
print self.subcommands_help
return
if args[0] not in self.subcommands.keys():
print "Subcommand %s not recognized\n" % args[0]
print self.subcommands_help
return
getattr(self, args[0])(args, options)
def jndiconfig(self, *args, **options):
def usage():
print
print "Add a line to your settings.py specifying the name of your JNDI datasource such as DATABASE_OPTIONS = {'JNDI_NAME': 'java:comp/env/jdbc/myDataSource'} and keep the other DATABASE settings untouched"
def resource_name():
return settings.DATABASE_OPTIONS['JNDI_NAME'].replace('java:comp/env/', '')
if not hasattr(settings, 'DATABASE_OPTIONS'):
print "You haven't set the DATABASE_OPTIONS"
usage()
return
if not 'JNDI_NAME' in settings.DATABASE_OPTIONS:
print "You haven't set the JNDI_NAME entry on DATABASE_OPTIONS"
usage()
return
from django.db import connection
print ("\nFor a basic configuration of JNDI on your Tomcat server, "
"create a file named %s.xml on "
"/path/to/apache-tomcat-6.x.x/conf/Catalina/localhost/ "
"with the following contents:" % self.project_name())
print """
<Context>
<Resource name="%s"
auth="Container"
type="javax.sql.DataSource"
username="%s"
password="%s"
driverClassName="%s"
url="%s"
maxActive="8"
maxIdle="4"/>
</Context>
""" % (resource_name(), settings.DATABASE_USER,
settings.DATABASE_PASSWORD, connection.driver_class_name,
connection.jdbc_url())
print ("Do NOT forget to copy the JDBC Driver jar file to the lib/ "
"directory of your Tomcat instalation")
def project_directory(self):
return os.path.dirname(self.settings_module().__file__)
def project_name(self):
return os.path.basename(self.project_directory())
def settings_module(self):
return __import__(settings.SETTINGS_MODULE, {}, {},
(settings.SETTINGS_MODULE.split(".")[-1],))
| Python |
import os
import shutil
import tempfile
import zipfile
import glob
from optparse import make_option
from django.core.management.base import BaseCommand
from django.conf import settings
from django.template import Context, Template
# TODO: The (ab)use of __file__ makes me nervous. We should improve compatibility
# with zipimport.
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('--include-java-libs', dest='include_java_libs', default='',
help='List of java libraries (in the form of JAR files), '
'which must be included, separated by the "%s" '
'character. Typically used for JDBC drivers ' %
os.path.pathsep),
make_option('--include-py-packages', dest='include_py_packages',
default='',
help='List of python top-level packages (directories) to '
'include separated by the "%s" character' %
os.path.pathsep),
make_option('--include-py-path-entries', dest='include_py_path_entries',
default='',
help='List of python path entries (directories or JAR/ZIP '
'files) to include, separated by the "%s" character' %
os.path.pathsep),
make_option('--include-additional-dirs', dest='include_add_dirs',
default='',
help='List of Directories to put in WEB-INF Folder '
'separated by the "%s" character' %
os.path.pathsep),
make_option('--context-root', dest='context_root', default='',
help='Name of the context root for the application. If '
'unspecified, the project name is used. The context '
'root name is used as the name of the WAR file, and '
'as a prefix for some url-related settings, such as '
'MEDIA_URL')
)
help = ("Builds a WAR file for stand-alone deployment on a Java "
"Servlet container")
def handle(self, *args, **options):
project_name = self.project_name()
context_root = options['context_root'] or project_name
temp_dir = tempfile.mkdtemp()
exploded_war_dir = os.path.join(temp_dir, project_name)
print
print "Assembling WAR on %s" % exploded_war_dir
print
self.copy_skel(exploded_war_dir)
self.fill_templates(exploded_war_dir,
['WEB-INF/web.xml', 'application.py'],
{'project_name': project_name,
'settings': settings})
self.copy_jython(exploded_war_dir)
self.copy_django(exploded_war_dir)
self.copy_project_media(exploded_war_dir)
self.copy_project_static(exploded_war_dir)
self.copy_admin_media(exploded_war_dir)
self.copy_project(exploded_war_dir)
self.fix_project_settings(exploded_war_dir, context_root)
self.copy_apps(exploded_war_dir)
if options['include_java_libs']:
for java_lib in options['include_java_libs'].split(os.path.pathsep):
self.copy_java_jar(exploded_war_dir, java_lib)
if options['include_py_packages']:
py_package_dirs = options['include_py_packages'].split(
os.path.pathsep)
for py_package_dir in py_package_dirs:
self.copy_py_package_dir(exploded_war_dir, py_package_dir)
if options['include_add_dirs']:
dirs = options['include_add_dirs'].split(os.path.pathsep)
for entry in dirs:
self.copy_add_dir(exploded_war_dir, entry)
if options['include_py_path_entries']:
py_path_entries = options['include_py_path_entries'].split(
os.path.pathsep)
for py_path_entry in py_path_entries:
self.copy_py_path_entry(exploded_war_dir, py_path_entry)
# I'm still unsure of wheter (by default) the WAR should be generated on
# the parent directory of the project root or inside the generated
# temporary directory.
#
# At least I'm sure I don't want to put it inside the project directory,
# to avoid cluttering it, and to keep it simple the logic of copying the
# project into the WAR (otherwise, it should special case the war file
# itself)
war_file_name = os.path.join(self.project_directory(),
'..', context_root + '.war')
self.war(exploded_war_dir, war_file_name)
print "Cleaning %s..." % temp_dir
shutil.rmtree(temp_dir)
print """
Finished.
Now you can copy %s to whatever location your application server wants it.
""" % os.path.abspath(war_file_name)
def copy_skel(self, exploded_war_dir):
print "Copying WAR skeleton..."
shutil.copytree(self._skel_directory(), exploded_war_dir)
def _skel_directory(self):
return os.path.join(os.path.dirname(__file__), 'war_skel')
def fill_templates(self, exploded_war_dir, relative_file_names, vars):
for relative_file_name in relative_file_names:
file_name = os.path.join(*[exploded_war_dir] +
relative_file_name.split('/'))
template = Template(file(file_name).read())
f = file(file_name, 'w')
f.write(template.render(Context(vars)))
f.close()
def copy_jython(self, exploded_war_dir):
jython_lib_path = os.path.dirname(os.path.abspath(os.__file__))
jython_home = os.path.dirname(jython_lib_path)
if jython_home.endswith('.jar'):
# We are on a Jython stand-alone installation.
self.copy_java_jar(exploded_war_dir, jython_home)
else:
# Is this Jython installation an official release version?
if os.path.exists(os.path.join(jython_home, 'jython.jar')):
self.copy_java_jar(exploded_war_dir,
os.path.join(jython_home,
'jython.jar'))
else:
# SVN installation: jython-dev.jar inside jython_home. Also need
# to include the extra java libraries
self.copy_java_jar(exploded_war_dir,
os.path.join(jython_home, 'jython-dev.jar'))
for jar in glob.glob(os.path.join(jython_home,
'javalib', '*.jar')):
self.copy_java_jar(exploded_war_dir, jar)
self.copy_py_path_entry(exploded_war_dir, jython_lib_path)
def copy_django(self, exploded_war_dir):
import django
django_dir = os.path.dirname(os.path.abspath(django.__file__))
self.copy_py_package_dir(exploded_war_dir, django_dir)
def copy_admin_media(self, exploded_war_dir):
from django.contrib import admin
self.copy_media(exploded_war_dir,
os.path.join(os.path.dirname(admin.__file__), 'media'),
os.path.join(*settings.ADMIN_MEDIA_PREFIX.split('/')))
def copy_project(self, exploded_war_dir):
self.copy_py_package_dir(exploded_war_dir, self.project_directory())
def fix_project_settings(self, exploded_war_dir, context_root):
fix_media = (settings.MEDIA_URL and
not settings.MEDIA_URL.startswith('http'))
fix_static = (settings.STATIC_URL and
not settings.STATIC_URL.startswith('http'))
fix_admin_media = (settings.ADMIN_MEDIA_PREFIX and
not settings.ADMIN_MEDIA_PREFIX.startswith('http'))
if not fix_media and not fix_admin_media and not fix_static:
return
fix = """
# Added by django-jython. Fixes URL prefixes to include the context root:
"""
if fix_media:
fix += "MEDIA_URL='/%s%s'\n" % (context_root, settings.MEDIA_URL)
if fix_admin_media:
fix += "ADMIN_MEDIA_PREFIX='/%s%s'\n" % (context_root,
settings.ADMIN_MEDIA_PREFIX)
if fix_static:
fix += "STATIC_URL='/%s%s'\n" % (context_root, settings.STATIC_URL)
settings_name = settings.SETTINGS_MODULE.split('.')[-1]
deployed_settings = os.path.join(exploded_war_dir,
'WEB-INF',
'lib-python',
self.project_name(),
settings_name + '.py')
if os.path.exists(deployed_settings):
settings_file_modified = file(deployed_settings, 'a')
settings_file_modified.write(fix)
settings_file_modified.close()
else:
print """WARNING: settings module file not found inside the project
directory (maybe you have split settings into a package?)
You SHOULD manually prefix the ADMIN_MEDIA_PREFIX and/or MEDIA_URL settings on the
deployed settings file. You can append the following block at the end of the file:
# ---------------------------- Begin Snip ---------------------------------
%s
# ----------------------------- End Snip -----------------------------------
""" % fix
def copy_project_media(self, exploded_war_dir):
if not settings.MEDIA_ROOT:
print ("WARNING: Not copying project media, since MEDIA_ROOT "
"is not defined")
return
if not settings.MEDIA_URL:
print ("WARNING: Not copying project media, since MEDIA_URL "
"is not defined")
return
if settings.MEDIA_URL.startswith('http'):
print ("WARNING: Not copying project media, since MEDIA_URL "
"is absolute (starts with 'http')")
self.copy_media(exploded_war_dir,
settings.MEDIA_ROOT,
os.path.join(*settings.MEDIA_URL.split('/')))
def copy_project_static(self, exploded_war_dir):
if not settings.STATIC_ROOT:
print ("WARNING: Not copying project static, since STATIC_ROOT "
"is not defined")
return
if not settings.STATIC_URL:
print ("WARNING: Not copying project static, since STATIC_URL "
"is not defined")
return
if settings.STATIC_URL.startswith('http'):
print ("WARNING: Not copying project static, since STATIC_URL "
"is absolute (starts with 'http')")
self.copy_media(exploded_war_dir,
settings.STATIC_ROOT,
os.path.join(*settings.STATIC_URL.split('/')))
def copy_apps(self, exploded_war_dir):
already_included_pkgs = ['django', self.project_name()]
for app in settings.INSTALLED_APPS:
# We copy the whole package in which the app resides
app_pkg = __import__(app)
if app_pkg.__name__ in already_included_pkgs:
continue
app_pkg_dir = os.path.dirname(os.path.abspath(app_pkg.__file__))
self.copy_py_package_dir(exploded_war_dir, app_pkg_dir)
already_included_pkgs.append(app_pkg.__name__)
def copy_java_jar(self, exploded_war_dir, java_lib):
# java_lib is a path to a JAR file
dest_name = os.path.basename(java_lib)
print "Copying %s..." % dest_name
shutil.copy(java_lib,
os.path.join(exploded_war_dir,
'WEB-INF', 'lib', dest_name))
def copy_py_package_dir(self, exploded_war_dir, py_package_dir):
"""
Copies a directory containing a python package to lib-python/
"""
dest_name = os.path.basename(py_package_dir)
print "Copying %s..." % dest_name
shutil.copytree(py_package_dir,
os.path.join(exploded_war_dir,
'WEB-INF', 'lib-python', dest_name))
def copy_add_dir(self, exploded_war_dir, add_dir):
"""
Copies a directory containing a folder to WEB-INF/
"""
dest_name = os.path.basename(add_dir)
print "Copying %s..." % dest_name
shutil.copytree(add_dir,
os.path.join(exploded_war_dir,
'WEB-INF', dest_name))
def copy_py_path_entry(self, exploded_war_dir, dir_or_file):
"""
Copies a directory or zip/egg file to lib-python and generates a .pth
file to make it part of sys.path
"""
dest_name = os.path.basename(dir_or_file)
print "Copying %s..." % dest_name
dest_path = os.path.join(exploded_war_dir,
'WEB-INF', 'lib-python', dest_name)
shutil.copytree(dir_or_file, dest_path)
pth_file = file(dest_path + '.pth', 'w')
pth_file.write("%s\n" % dest_name)
pth_file.close()
def copy_media(self, exploded_war_dir, src_dir, dest_relative_path):
if dest_relative_path[-1] == os.path.sep:
dest_relative_path = dest_relative_path[:-1]
if os.path.sep in dest_relative_path:
# We have to construct the directory hierarchy (without the last
# level)
d = exploded_war_dir
for sub_dir in os.path.split(dest_relative_path)[:-1]:
d = os.path.join(d, sub_dir)
os.mkdir(d)
print "Copying %s..." % dest_relative_path
shutil.copytree(src_dir,
os.path.join(exploded_war_dir, dest_relative_path))
def war(self, exploded_war_dir, war_file_name):
# Make sure we are working with absolute paths
exploded_war_dir = os.path.abspath(exploded_war_dir)
war_file_name = os.path.abspath(war_file_name)
print "Building WAR on %s..." % war_file_name
war = zipfile.ZipFile(war_file_name, 'w',
compression=zipfile.ZIP_DEFLATED)
def walker(arg, directory, files):
# The following "+ 1" accounts for the path separator after the
# directory name
relative_dir = directory[len(exploded_war_dir) + 1:]
for f in files:
file_name = os.path.join(directory, f)
zip_file_name = os.path.join(relative_dir, f)
if not os.path.isfile(file_name):
continue
war.write(file_name,
os.path.join(relative_dir, f),
zipfile.ZIP_DEFLATED)
os.path.walk(exploded_war_dir, walker, None)
war.close()
def settings_module(self):
return __import__(settings.SETTINGS_MODULE, {}, {},
(settings.SETTINGS_MODULE.split(".")[-1],))
def project_directory(self):
return os.path.dirname(self.settings_module().__file__)
def project_name(self):
return os.path.basename(self.project_directory())
| Python |
from django.core.handlers import wsgi
import os
def handler(environ, start_response):
os.putenv("DJANGO_SETTINGS_MODULE", "{{ settings.SETTINGS_MODULE }}")
h = wsgi.WSGIHandler()
return h(environ, start_response)
| Python |
"""MySQL FIELD_TYPE Constants
These constants represent the various column (field) types that are
supported by MySQL.
"""
DECIMAL = 0
TINY = 1
SHORT = 2
LONG = 3
FLOAT = 4
DOUBLE = 5
NULL = 6
TIMESTAMP = 7
LONGLONG = 8
INT24 = 9
DATE = 10
TIME = 11
DATETIME = 12
YEAR = 13
NEWDATE = 14
VARCHAR = 15
BIT = 16
NEWDECIMAL = 246
ENUM = 247
SET = 248
TINY_BLOB = 249
MEDIUM_BLOB = 250
LONG_BLOB = 251
BLOB = 252
VAR_STRING = 253
STRING = 254
GEOMETRY = 255
CHAR = TINY
INTERVAL = ENUM
| Python |
# Empty file.
| Python |
from django.db.backends import BaseDatabaseIntrospection
from doj.backends.zxjdbc.mysql._mysql_exceptions import ProgrammingError, OperationalError
from doj.backends.zxjdbc.mysql.constants import FIELD_TYPE
import re
foreign_key_re = re.compile(r"\sCONSTRAINT `[^`]*` FOREIGN KEY \(`([^`]*)`\) REFERENCES `([^`]*)` \(`([^`]*)`\)")
class DatabaseIntrospection(BaseDatabaseIntrospection):
data_types_reverse = {
FIELD_TYPE.BLOB: 'TextField',
FIELD_TYPE.CHAR: 'CharField',
FIELD_TYPE.DECIMAL: 'DecimalField',
FIELD_TYPE.NEWDECIMAL: 'DecimalField',
FIELD_TYPE.DATE: 'DateField',
FIELD_TYPE.DATETIME: 'DateTimeField',
FIELD_TYPE.DOUBLE: 'FloatField',
FIELD_TYPE.FLOAT: 'FloatField',
FIELD_TYPE.INT24: 'IntegerField',
FIELD_TYPE.LONG: 'IntegerField',
FIELD_TYPE.LONGLONG: 'IntegerField',
FIELD_TYPE.SHORT: 'IntegerField',
FIELD_TYPE.STRING: 'CharField',
FIELD_TYPE.TIMESTAMP: 'DateTimeField',
FIELD_TYPE.TINY: 'IntegerField',
FIELD_TYPE.TINY_BLOB: 'TextField',
FIELD_TYPE.MEDIUM_BLOB: 'TextField',
FIELD_TYPE.LONG_BLOB: 'TextField',
FIELD_TYPE.VAR_STRING: 'CharField',
}
def get_table_list(self, cursor):
"Returns a list of table names in the current database."
cursor.execute("SHOW TABLES")
return [row[0] for row in cursor.fetchall()]
def get_table_description(self, cursor, table_name):
"Returns a description of the table, with the DB-API cursor.description interface."
cursor.execute("SELECT * FROM %s LIMIT 1" % self.connection.ops.quote_name(table_name))
return cursor.description
def _name_to_index(self, cursor, table_name):
"""
Returns a dictionary of {field_name: field_index} for the given table.
Indexes are 0-based.
"""
return dict([(d[0], i) for i, d in enumerate(self.get_table_description(cursor, table_name))])
def get_relations(self, cursor, table_name):
"""
Returns a dictionary of {field_index: (field_index_other_table, other_table)}
representing all relationships to the given table. Indexes are 0-based.
"""
my_field_dict = self._name_to_index(cursor, table_name)
constraints = []
relations = {}
try:
# This should work for MySQL 5.0.
cursor.execute("""
SELECT column_name, referenced_table_name, referenced_column_name
FROM information_schema.key_column_usage
WHERE table_name = %s
AND table_schema = DATABASE()
AND referenced_table_name IS NOT NULL
AND referenced_column_name IS NOT NULL""", [table_name])
constraints.extend(cursor.fetchall())
except (ProgrammingError, OperationalError):
# Fall back to "SHOW CREATE TABLE", for previous MySQL versions.
# Go through all constraints and save the equal matches.
cursor.execute("SHOW CREATE TABLE %s" % self.connection.ops.quote_name(table_name))
for row in cursor.fetchall():
pos = 0
while True:
match = foreign_key_re.search(row[1], pos)
if match == None:
break
pos = match.end()
constraints.append(match.groups())
for my_fieldname, other_table, other_field in constraints:
other_field_index = self._name_to_index(cursor, other_table)[other_field]
my_field_index = my_field_dict[my_fieldname]
relations[my_field_index] = (other_field_index, other_table)
return relations
def get_indexes(self, cursor, table_name):
"""
Returns a dictionary of fieldname -> infodict for the given table,
where each infodict is in the format:
{'primary_key': boolean representing whether it's the primary key,
'unique': boolean representing whether it's a unique index}
"""
cursor.execute("SHOW INDEX FROM %s" % self.connection.ops.quote_name(table_name))
indexes = {}
for row in cursor.fetchall():
indexes[row[4]] = {'primary_key': (row[2] == 'PRIMARY'), 'unique': not bool(row[1])}
return indexes
| Python |
"""_mysql_exceptions: Exception classes for _mysql and MySQLdb.
These classes are dictated by the DB API v2.0:
http://www.python.org/topics/database/DatabaseAPI-2.0.html
"""
from exceptions import Exception, StandardError, Warning
class MySQLError(StandardError):
"""Exception related to operation with MySQL."""
class Warning(Warning, MySQLError):
"""Exception raised for important warnings like data truncations
while inserting, etc."""
class Error(MySQLError):
"""Exception that is the base class of all other error exceptions
(not Warning)."""
class InterfaceError(Error):
"""Exception raised for errors that are related to the database
interface rather than the database itself."""
class DatabaseError(Error):
"""Exception raised for errors that are related to the
database."""
class DataError(DatabaseError):
"""Exception raised for errors that are due to problems with the
processed data like division by zero, numeric value out of range,
etc."""
class OperationalError(DatabaseError):
"""Exception raised for errors that are related to the database's
operation and not necessarily under the control of the programmer,
e.g. an unexpected disconnect occurs, the data source name is not
found, a transaction could not be processed, a memory allocation
error occurred during processing, etc."""
class IntegrityError(DatabaseError):
"""Exception raised when the relational integrity of the database
is affected, e.g. a foreign key check fails, duplicate key,
etc."""
class InternalError(DatabaseError):
"""Exception raised when the database encounters an internal
error, e.g. the cursor is not valid anymore, the transaction is
out of sync, etc."""
class ProgrammingError(DatabaseError):
"""Exception raised for programming errors, e.g. table not found
or already exists, syntax error in the SQL statement, wrong number
of parameters specified, etc."""
class NotSupportedError(DatabaseError):
"""Exception raised in case a method or database API was used
which is not supported by the database, e.g. requesting a
.rollback() on a connection that does not support transaction or
has transactions turned off."""
del Exception, StandardError
| Python |
import copy
from django.db.backends.creation import BaseDatabaseCreation
from django.db.backends.mysql.creation import DatabaseCreation as MysqlDatabaseCreation
class DatabaseCreation(BaseDatabaseCreation):
def __init__(self, *args, **kwargs):
super(DatabaseCreation, self).__init__(*args, **kwargs)
# Avoid using the inet data type, because using it from JDBC is a pain.
#
# By reading http://archives.postgresql.org/pgsql-jdbc/2007-08/msg00089.php
# seems like we would have to patch the JDBC driver with this extension:
# http://oak.cats.ohiou.edu/~rf358197/jdbc/2/.
self.data_types = copy.copy(MysqlDatabaseCreation.data_types)
self.data_types['IPAddressField'] = 'char(15)'
| Python |
"""
MySQL database backend for Django/Jython
"""
try:
from com.ziclix.python.sql import zxJDBC as Database
except ImportError, e:
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured("Error loading zxJDBC module: %s" % e)
from django.db.backends import BaseDatabaseWrapper, BaseDatabaseFeatures, BaseDatabaseValidation
from django.db.backends import BaseDatabaseOperations
#from django.db.backends.mysql.base import DatabaseOperations as MysqlDatabaseOperations
from django.db.backends.mysql.client import DatabaseClient
#from django.db.backends.mysql.introspection import DatabaseIntrospection
from doj.backends.zxjdbc.mysql.creation import DatabaseCreation
from doj.backends.zxjdbc.mysql.introspection import DatabaseIntrospection
from doj.backends.zxjdbc.common import zxJDBCOperationsMixin, zxJDBCFeaturesMixin
from doj.backends.zxjdbc.common import zxJDBCCursorWrapper, set_default_isolation_level
from doj.backends.zxjdbc.common import zxJDBCDatabaseWrapper
from com.ziclix.python.sql.handler import MySQLDataHandler
DatabaseError = Database.DatabaseError
IntegrityError = Database.IntegrityError
class DatabaseFeatures(zxJDBCFeaturesMixin, BaseDatabaseFeatures):
update_can_self_select = False
related_fields_match_type = True
class MysqlDatabaseOperations(BaseDatabaseOperations):
def date_extract_sql(self, lookup_type, field_name):
# http://dev.mysql.com/doc/mysql/en/date-and-time-functions.html
return "EXTRACT(%s FROM %s)" % (lookup_type.upper(), field_name)
def date_trunc_sql(self, lookup_type, field_name):
fields = ['year', 'month', 'day', 'hour', 'minute', 'second']
format = ('%%Y-', '%%m', '-%%d', ' %%H:', '%%i', ':%%s') # Use double percents to escape.
format_def = ('0000-', '01', '-01', ' 00:', '00', ':00')
try:
i = fields.index(lookup_type) + 1
except ValueError:
sql = field_name
else:
format_str = ''.join([f for f in format[:i]] + [f for f in format_def[i:]])
sql = "CAST(DATE_FORMAT(%s, '%s') AS DATETIME)" % (field_name, format_str)
return sql
def drop_foreignkey_sql(self):
return "DROP FOREIGN KEY"
def force_no_ordering(self):
"""
"ORDER BY NULL" prevents MySQL from implicitly ordering by grouped
columns. If no ordering would otherwise be applied, we don't want any
implicit sorting going on.
"""
return ["NULL"]
def fulltext_search_sql(self, field_name):
return 'MATCH (%s) AGAINST (%%s IN BOOLEAN MODE)' % field_name
def no_limit_value(self):
# 2**64 - 1, as recommended by the MySQL documentation
return 18446744073709551615L
def quote_name(self, name):
if name.startswith("`") and name.endswith("`"):
return name # Quoting once is enough.
return "`%s`" % name
def random_function_sql(self):
return 'RAND()'
def sql_flush(self, style, tables, sequences):
# NB: The generated SQL below is specific to MySQL
# 'TRUNCATE x;', 'TRUNCATE y;', 'TRUNCATE z;'... style SQL statements
# to clear all tables of all data
if tables:
sql = ['SET FOREIGN_KEY_CHECKS = 0;']
for table in tables:
sql.append('%s %s;' % (style.SQL_KEYWORD('TRUNCATE'), style.SQL_FIELD(self.quote_name(table))))
sql.append('SET FOREIGN_KEY_CHECKS = 1;')
# 'ALTER TABLE table AUTO_INCREMENT = 1;'... style SQL statements
# to reset sequence indices
sql.extend(["%s %s %s %s %s;" % \
(style.SQL_KEYWORD('ALTER'),
style.SQL_KEYWORD('TABLE'),
style.SQL_TABLE(self.quote_name(sequence['table'])),
style.SQL_KEYWORD('AUTO_INCREMENT'),
style.SQL_FIELD('= 1'),
) for sequence in sequences])
return sql
else:
return []
def value_to_db_datetime(self, value):
if value is None:
return None
# MySQL doesn't support tz-aware datetimes
if value.tzinfo is not None:
raise ValueError("MySQL backend does not support timezone-aware datetimes.")
# MySQL doesn't support microseconds
return unicode(value.replace(microsecond=0))
def value_to_db_time(self, value):
if value is None:
return None
# MySQL doesn't support tz-aware datetimes
if value.tzinfo is not None:
raise ValueError("MySQL backend does not support timezone-aware datetimes.")
# MySQL doesn't support microseconds
return unicode(value.replace(microsecond=0))
def year_lookup_bounds(self, value):
# Again, no microseconds
first = '%s-01-01 00:00:00'
second = '%s-12-31 23:59:59.99'
return [first % value, second % value]
class DatabaseOperations(zxJDBCOperationsMixin, MysqlDatabaseOperations):
pass # The mixin contains all what is needed
class DatabaseWrapper(zxJDBCDatabaseWrapper):
driver_class_name = 'com.mysql.jdbc.Driver'
jdbc_url_pattern = \
"jdbc:mysql://%(HOST)s%(PORT)s/%(NAME)s"
operators = {
'exact': '= %s',
'iexact': 'LIKE %s',
'contains': 'LIKE BINARY %s',
'icontains': 'LIKE %s',
'regex': 'REGEXP BINARY %s',
'iregex': 'REGEXP %s',
'gt': '> %s',
'gte': '>= %s',
'lt': '< %s',
'lte': '<= %s',
'startswith': 'LIKE BINARY %s',
'endswith': 'LIKE BINARY %s',
'istartswith': 'LIKE %s',
'iendswith': 'LIKE %s',
}
def __init__(self, *args, **kwargs):
super(DatabaseWrapper, self).__init__(*args, **kwargs)
self.features = DatabaseFeatures(self)
self.ops = DatabaseOperations()
self.client = DatabaseClient(self)
self.creation = DatabaseCreation(self)
self.introspection = DatabaseIntrospection(self)
self.validation = BaseDatabaseValidation(self)
def _cursor(self):
if self.connection is None:
self.connection = self.new_connection()
# make transactions transparent to all cursors
set_default_isolation_level(self.connection, innodb_binlog=True)
real_cursor = self.connection.cursor()
# Use the MySQL DataHandler for better compatibility:
real_cursor.datahandler = MySQLDataHandler(real_cursor.datahandler)
return CursorWrapper(real_cursor)
class CursorWrapper(zxJDBCCursorWrapper):
def execute(self, *args, **kwargs):
try:
super(CursorWrapper, self).execute(*args, **kwargs)
except Database.Error:
# MySQL connections become unusable after an exception
# occurs, unless the current transaction is rollback'ed.
self.connection.rollback()
raise
def executemany(self, *args, **kwargs):
try:
super(CursorWrapper, self).executemany(*args, **kwargs)
except Database.Error:
# MySQL connections become unusable after an exception
# occurs, unless the current transaction is rollback'ed.
self.connection.rollback()
raise
| Python |
# Empty file. And this comment is to keep patch/diff happy
| Python |
"""
Custom Query class for MS SQL Server.
Derives from: django.db.models.sql.query.Query
"""
from datetime import datetime
REV_ODIR = {
'ASC': 'DESC',
'DESC': 'ASC'
}
SQL_SERVER_8_LIMIT_QUERY = \
"""SELECT *
FROM (
SELECT TOP %(limit)s *
FROM (
%(orig_sql)s
ORDER BY %(ord)s
) AS %(table)s
ORDER BY %(rev_ord)s
) AS %(table)s
ORDER BY %(ord)s"""
SQL_SERVER_8_NO_LIMIT_QUERY = \
"""SELECT *
FROM %(table)s
WHERE %(key)s NOT IN (
%(orig_sql)s
ORDER BY %(ord)s
)"""
# Strategies for handling limit+offset emulation:
USE_ROW_NUMBER = 0 # For SQL Server >= 2005
USE_TOP_HMARK = 1 # For SQL Server 2000 when both limit and offset are provided
USE_TOP_LMARK = 2 # For SQL Server 2000 when offset but no limit is provided
# Cache. Maps default query class to new MS SQL query class.
_classes = {}
# Gets the base class for all Django queries.
# Django's subquery items (InsertQuery, DeleteQuery, etc.) will then inherit
# from this custom class.
def query_class(QueryClass):
"""
Returns a custom django.db.models.sql.query.Query subclass that is
appropriate for MS SQL Server.
"""
global _classes
try:
return _classes[QueryClass]
except KeyError:
pass
class PyOdbcSSQuery(QueryClass):
from sql_server.pyodbc import aggregates
aggregates_module = aggregates
def __init__(self, *args, **kwargs):
super(PyOdbcSSQuery, self).__init__(*args, **kwargs)
self.default_reverse_ordering = False
self._ord = []
# If we are an insert query, monkeypatch the "as_sql" method
from django.db.models.sql.subqueries import InsertQuery
if isinstance(self, InsertQuery):
self._orig_as_sql = self.as_sql
self.as_sql = self._insert_as_sql
def _insert_as_sql(self, *args, **kwargs):
"""Helper method for monkeypatching Django InsertQuery's as_sql."""
meta = self.get_meta()
quoted_table = self.connection.ops.quote_name(meta.db_table)
# Get (sql, params) from original InsertQuery.as_sql
sql, params = self._orig_as_sql(*args, **kwargs)
if meta.pk.db_column in self.columns and meta.pk.__class__.__name__ == "AutoField":
if len(self.columns) == 1 and not params:
sql = "INSERT INTO %s DEFAULT VALUES" % quoted_table
else:
sql = "SET IDENTITY_INSERT %s ON;\n%s;\nSET IDENTITY_INSERT %s OFF" % \
(quoted_table, sql, quoted_table)
return sql, params
def __reduce__(self):
"""
Enable pickling for this class (normal pickling handling doesn't
work as Python can only pickle module-level classes by default).
"""
if hasattr(QueryClass, '__getstate__'):
assert hasattr(QueryClass, '__setstate__')
data = self.__getstate__()
else:
data = self.__dict__
return (unpickle_query_class, (QueryClass,), data)
def convert_values(self, value, field):
"""
Coerce the value returned by the database backend into a consistent
type that is compatible with the field type.
In our case, cater for the fact that SQL Server < 2008 has no
separate Date and Time data types.
TODO: See how we'll handle this for SQL Server >= 2008
"""
if value is None:
return None
if field and field.get_internal_type() == 'DateTimeField':
return value
elif field and field.get_internal_type() == 'DateField':
value = value.date() # extract date
elif field and field.get_internal_type() == 'TimeField' or (isinstance(value, datetime) and value.year == 1900 and value.month == value.day == 1):
value = value.time() # extract time
# Some cases (for example when select_related() is used) aren't
# caught by the DateField case above and date fields arrive from
# the DB as datetime instances.
# Implement a workaround stealing the idea from the Oracle
# backend. It's not perfect so the same warning applies (i.e. if a
# query results in valid date+time values with the time part set
# to midnight, this workaround can surprise us by converting them
# to the datetime.date Python type).
elif isinstance(value, datetime) and value.hour == value.minute == value.second == value.microsecond == 0:
value = value.date()
# Force floats to the correct type
elif value is not None and field and field.get_internal_type() == 'FloatField':
value = float(value)
return value
def resolve_columns(self, row, fields=()):
index_start = len(self.extra_select.keys())
values = [self.convert_values(v, None) for v in row[:index_start]]
for value, field in map(None, row[index_start:], fields):
values.append(self.convert_values(value, field))
return tuple(values)
def modify_query(self, strategy, ordering, out_cols):
"""
Helper method, called from _as_sql()
Sets the value of the self._ord and self.default_reverse_ordering
attributes.
Can modify the values of the out_cols list argument and the
self.ordering_aliases attribute.
"""
self.default_reverse_ordering = False
self._ord = []
cnt = 0
extra_select_aliases = [k.strip('[]') for k in self.extra_select.keys()]
for ord_spec_item in ordering:
if ord_spec_item.endswith(' ASC') or ord_spec_item.endswith(' DESC'):
parts = ord_spec_item.split()
col, odir = ' '.join(parts[:-1]), parts[-1]
if col not in self.ordering_aliases and col.strip('[]') not in extra_select_aliases:
if col.isdigit():
cnt += 1
n = int(col)-1
alias = 'OrdAlias%d' % cnt
out_cols[n] = '%s AS [%s]' % (out_cols[n], alias)
self._ord.append((alias, odir))
elif col in out_cols:
if strategy == USE_TOP_HMARK:
cnt += 1
n = out_cols.index(col)
alias = 'OrdAlias%d' % cnt
out_cols[n] = '%s AS %s' % (col, alias)
self._ord.append((alias, odir))
else:
self._ord.append((col, odir))
elif strategy == USE_TOP_HMARK:
# Special case: '_order' column created by Django
# when Meta.order_with_respect_to is used
if col.split('.')[-1] == '[_order]' and odir == 'DESC':
self.default_reverse_ordering = True
cnt += 1
alias = 'OrdAlias%d' % cnt
self._ord.append((alias, odir))
self.ordering_aliases.append('%s AS [%s]' % (col, alias))
else:
self._ord.append((col, odir))
else:
self._ord.append((col, odir))
if strategy == USE_ROW_NUMBER and not self._ord and 'RAND()' in ordering:
self._ord.append(('RAND()',''))
if strategy == USE_TOP_HMARK and not self._ord:
# XXX:
#meta = self.get_meta()
meta = self.model._meta
qn = self.quote_name_unless_alias
pk_col = '%s.%s' % (qn(meta.db_table), qn(meta.pk.db_column or meta.pk.column))
if pk_col not in out_cols:
out_cols.append(pk_col)
def _as_sql(self, strategy):
"""
Helper method, called from as_sql()
Similar to django/db/models/sql/query.py:Query.as_sql() but without
the ordering and limits code.
Returns SQL that hasn't an order-by clause.
"""
# get_columns needs to be called before get_ordering to populate
# _select_alias.
out_cols = self.get_columns(True)
ordering, ordering_group_by = self.get_ordering()
if strategy == USE_ROW_NUMBER:
if not ordering:
meta = self.get_meta()
qn = self.quote_name_unless_alias
# Special case: pk not in out_cols, use random ordering.
#
if '%s.%s' % (qn(meta.db_table), qn(meta.pk.db_column or meta.pk.column)) not in self.get_columns():
ordering = ['RAND()']
# XXX: Maybe use group_by field for ordering?
#if self.group_by:
#ordering = ['%s.%s ASC' % (qn(self.group_by[0][0]),qn(self.group_by[0][1]))]
else:
ordering = ['%s.%s ASC' % (qn(meta.db_table), qn(meta.pk.db_column or meta.pk.column))]
if strategy in (USE_TOP_HMARK, USE_ROW_NUMBER):
self.modify_query(strategy, ordering, out_cols)
if strategy == USE_ROW_NUMBER:
ord = ', '.join(['%s %s' % pair for pair in self._ord])
self.ordering_aliases.append('(ROW_NUMBER() OVER (ORDER BY %s)) AS [rn]' % ord)
# This must come after 'select' and 'ordering' -- see docstring of
# get_from_clause() for details.
from_, f_params = self.get_from_clause()
qn = self.quote_name_unless_alias
where, w_params = self.where.as_sql(qn=qn)
having, h_params = self.having.as_sql(qn=qn)
params = []
for val in self.extra_select.itervalues():
params.extend(val[1])
result = ['SELECT']
if self.distinct:
result.append('DISTINCT')
if strategy == USE_TOP_LMARK:
# XXX:
#meta = self.get_meta()
meta = self.model._meta
result.append('TOP %s %s' % (self.low_mark, self.quote_name_unless_alias(meta.pk.db_column or meta.pk.column)))
else:
if strategy == USE_TOP_HMARK and self.high_mark is not None:
result.append('TOP %s' % self.high_mark)
result.append(', '.join(out_cols + self.ordering_aliases))
result.append('FROM')
result.extend(from_)
params.extend(f_params)
if where:
result.append('WHERE %s' % where)
params.extend(w_params)
if self.extra_where:
if not where:
result.append('WHERE')
else:
result.append('AND')
result.append(' AND '.join(self.extra_where))
grouping, gb_params = self.get_grouping()
if grouping:
if ordering:
# If the backend can't group by PK (i.e., any database
# other than MySQL), then any fields mentioned in the
# ordering clause needs to be in the group by clause.
if not self.connection.features.allows_group_by_pk:
for col, col_params in ordering_group_by:
if col not in grouping:
grouping.append(str(col))
gb_params.extend(col_params)
else:
ordering = self.connection.ops.force_no_ordering()
result.append('GROUP BY %s' % ', '.join(grouping))
params.extend(gb_params)
if having:
result.append('HAVING %s' % having)
params.extend(h_params)
params.extend(self.extra_params)
return ' '.join(result), tuple(params)
def as_sql(self, with_limits=True, with_col_aliases=False):
"""
Creates the SQL for this query. Returns the SQL string and list of
parameters.
If 'with_limits' is False, any limit/offset information is not included
in the query.
"""
# The do_offset flag indicates whether we need to construct
# the SQL needed to use limit/offset w/SQL Server.
do_offset = with_limits and (self.high_mark is not None or self.low_mark != 0)
# If no offsets, just return the result of the base class
# `as_sql`.
if not do_offset:
return super(PyOdbcSSQuery, self).as_sql(with_limits=False,
with_col_aliases=with_col_aliases)
# Shortcut for the corner case when high_mark value is 0:
if self.high_mark == 0:
return "", ()
self.pre_sql_setup()
# XXX:
#meta = self.get_meta()
meta = self.model._meta
qn = self.quote_name_unless_alias
fallback_ordering = '%s.%s' % (qn(meta.db_table), qn(meta.pk.db_column or meta.pk.column))
# SQL Server 2000, offset+limit case
if self.connection.ops.sql_server_ver < 2005 and self.high_mark is not None:
orig_sql, params = self._as_sql(USE_TOP_HMARK)
if self._ord:
ord = ', '.join(['%s %s' % pair for pair in self._ord])
rev_ord = ', '.join(['%s %s' % (col, REV_ODIR[odir]) for col, odir in self._ord])
else:
if not self.default_reverse_ordering:
ord = '%s ASC' % fallback_ordering
rev_ord = '%s DESC' % fallback_ordering
else:
ord = '%s DESC' % fallback_ordering
rev_ord = '%s ASC' % fallback_ordering
sql = SQL_SERVER_8_LIMIT_QUERY % {
'limit': self.high_mark - self.low_mark,
'orig_sql': orig_sql,
'ord': ord,
'rev_ord': rev_ord,
# XXX:
'table': qn(meta.db_table),
}
return sql, params
# SQL Server 2005
if self.connection.ops.sql_server_ver >= 2005:
sql, params = self._as_sql(USE_ROW_NUMBER)
# Construct the final SQL clause, using the initial select SQL
# obtained above.
result = ['SELECT * FROM (%s) AS X' % sql]
# Place WHERE condition on `rn` for the desired range.
if self.high_mark is None:
self.high_mark = 9223372036854775807
result.append('WHERE X.rn BETWEEN %d AND %d' % (self.low_mark+1, self.high_mark))
return ' '.join(result), params
# SQL Server 2000, offset without limit case
# get_columns needs to be called before get_ordering to populate
# select_alias.
self.get_columns(with_col_aliases)
ordering, ordering_group_by = self.get_ordering()
if ordering:
ord = ', '.join(ordering)
else:
# We need to define an ordering clause since none was provided
ord = fallback_ordering
orig_sql, params = self._as_sql(USE_TOP_LMARK)
sql = SQL_SERVER_8_NO_LIMIT_QUERY % {
'orig_sql': orig_sql,
'ord': ord,
'table': qn(meta.db_table),
'key': qn(meta.pk.db_column or meta.pk.column),
}
return sql, params
_classes[QueryClass] = PyOdbcSSQuery
return PyOdbcSSQuery
def unpickle_query_class(QueryClass):
"""
Utility function, called by Python's unpickling machinery, that handles
unpickling of our custom Query subclasses.
"""
klass = query_class(QueryClass)
return klass.__new__(klass)
unpickle_query_class.__safe_for_unpickling__ = True
| Python |
from django.db.backends import BaseDatabaseIntrospection
from com.ziclix.python.sql import zxJDBC as Database
SQL_AUTOFIELD = -777555
class DatabaseIntrospection(BaseDatabaseIntrospection):
# Map type codes to Django Field types.
data_types_reverse = {
SQL_AUTOFIELD: 'AutoField',
Database.BIGINT: 'IntegerField',
Database.BIT: 'BooleanField',
Database.CHAR: 'CharField',
Database.DECIMAL: 'DecimalField',
Database.DOUBLE: 'FloatField',
Database.FLOAT: 'FloatField',
#Database.SQL_GUID: 'TextField',
Database.INTEGER: 'IntegerField',
#Database.LONGVARBINARY: ,
#Database.LONGVARCHAR: ,
Database.NUMERIC: 'DecimalField',
Database.REAL: 'FloatField',
Database.SMALLINT: 'SmallIntegerField',
Database.TINYINT: 'SmallIntegerField',
Database.DATETIME: 'DateField',
Database.TIME: 'TimeField',
Database.TIMESTAMP: 'DateTimeField',
#Database.SQL_VARBINARY: ,
Database.VARCHAR: 'TextField',
Database.NCHAR: 'CharField',
Database.LONGNVARCHAR: 'TextField',
Database.NVARCHAR: 'TextField',
}
def get_table_list(self, cursor):
"""
Returns a list of table names in the current database.
"""
# TABLES: http://msdn2.microsoft.com/en-us/library/ms186224.aspx
cursor.execute("SELECT TABLE_NAME FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_TYPE = 'BASE TABLE'")
return [row[0] for row in cursor.fetchall()]
def _is_auto_field(self, cursor, table_name, column_name):
"""
Checks whether column is Identity
"""
# COLUMNPROPERTY: http://msdn2.microsoft.com/en-us/library/ms174968.aspx
cursor.execute("SELECT COLUMNPROPERTY(OBJECT_ID(%s), %s, 'IsIdentity')",
(self.connection.ops.quote_name(table_name), column_name))
return cursor.fetchall()[0][0]
def get_table_description(self, cursor, table_name, identity_check=True):
"""Returns a description of the table, with DB-API cursor.description interface.
The 'auto_check' parameter has been added to the function argspec.
If set to True, the function will check each of the table's fields for the
IDENTITY property (the IDENTITY property is the MSSQL equivalent to an AutoField).
When a field is found with an IDENTITY property, it is given a custom field number
of SQL_AUTOFIELD, which maps to the 'AutoField' value in the DATA_TYPES_REVERSE dict.
"""
# map zxJDBC's cursor.columns to db-api cursor description
# columns(String catalog, String schemaPattern, String tableNamePattern, String columnNamePattern) gives a description of table columns available in the specified catalog
# To view metadata provided by the cursor, each metadata method must be called, then the cursor must be used to retrieve the data: http://java-connect.info/New.Riders-Jython.for.Java.Programmers/_chapter%2011.htm
cursor.columns(None, None, table_name, None)
columns = [[c[3], c[4], None, c[6], c[6], c[8], c[10]] for c in cursor.fetchall()]
items = []
for column in columns:
if identity_check and self._is_auto_field(cursor, table_name, column[0]):
column[1] = SQL_AUTOFIELD
if column[1] == Database.NVARCHAR and column[3] < 4000:
column[1] = Database.NCHAR
items.append(column)
return items
def _name_to_index(self, cursor, table_name):
"""
Returns a dictionary of {field_name: field_index} for the given table.
Indexes are 0-based.
"""
return dict([(d[0], i) for i, d in enumerate(self.get_table_description(cursor, table_name, identity_check=False))])
def get_relations(self, cursor, table_name):
"""
Returns a dictionary of {field_index: (field_index_other_table, other_table)}
representing all relationships to the given table. Indexes are 0-based.
"""
# CONSTRAINT_COLUMN_USAGE: http://msdn2.microsoft.com/en-us/library/ms174431.aspx
# CONSTRAINT_TABLE_USAGE: http://msdn2.microsoft.com/en-us/library/ms179883.aspx
# REFERENTIAL_CONSTRAINTS: http://msdn2.microsoft.com/en-us/library/ms179987.aspx
# TABLE_CONSTRAINTS: http://msdn2.microsoft.com/en-us/library/ms181757.aspx
table_index = self._name_to_index(cursor, table_name)
sql = """
SELECT e.COLUMN_NAME AS column_name,
c.TABLE_NAME AS referenced_table_name,
d.COLUMN_NAME AS referenced_column_name
FROM INFORMATION_SCHEMA.TABLE_CONSTRAINTS AS a
INNER JOIN INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS AS b
ON a.CONSTRAINT_NAME = b.CONSTRAINT_NAME
INNER JOIN INFORMATION_SCHEMA.CONSTRAINT_TABLE_USAGE AS c
ON b.UNIQUE_CONSTRAINT_NAME = c.CONSTRAINT_NAME
INNER JOIN INFORMATION_SCHEMA.CONSTRAINT_COLUMN_USAGE AS d
ON c.CONSTRAINT_NAME = d.CONSTRAINT_NAME
INNER JOIN INFORMATION_SCHEMA.CONSTRAINT_COLUMN_USAGE AS e
ON a.CONSTRAINT_NAME = e.CONSTRAINT_NAME
WHERE a.TABLE_NAME = %s AND a.CONSTRAINT_TYPE = 'FOREIGN KEY'"""
cursor.execute(sql, (table_name,))
return dict([(table_index[item[0]], (self._name_to_index(cursor, item[1])[item[2]], item[1]))
for item in cursor.fetchall()])
def get_indexes(self, cursor, table_name):
"""
Returns a dictionary of fieldname -> infodict for the given table,
where each infodict is in the format:
{'primary_key': boolean representing whether it's the primary key,
'unique': boolean representing whether it's a unique index,
'db_index': boolean representing whether it's a non-unique index}
"""
# CONSTRAINT_COLUMN_USAGE: http://msdn2.microsoft.com/en-us/library/ms174431.aspx
# TABLE_CONSTRAINTS: http://msdn2.microsoft.com/en-us/library/ms181757.aspx
pk_uk_sql = """
SELECT b.COLUMN_NAME, a.CONSTRAINT_TYPE
FROM INFORMATION_SCHEMA.TABLE_CONSTRAINTS AS a
INNER JOIN INFORMATION_SCHEMA.CONSTRAINT_COLUMN_USAGE AS b
ON a.CONSTRAINT_NAME = b.CONSTRAINT_NAME AND a.TABLE_NAME = b.TABLE_NAME
WHERE a.TABLE_NAME = %s AND (CONSTRAINT_TYPE = 'PRIMARY KEY' OR CONSTRAINT_TYPE = 'UNIQUE')"""
field_names = [item[0] for item in self.get_table_description(cursor, table_name, identity_check=False)]
indexes, results = {}, {}
cursor.execute(pk_uk_sql, (table_name,))
data = cursor.fetchall()
if data:
results.update(data)
if self.connection.ops.sql_server_ver >= 2005:
# non-unique, non-compound indexes, only in SS2005?
ix_sql = """
SELECT DISTINCT c.name
FROM sys.columns c
INNER JOIN sys.index_columns ic
ON ic.object_id = c.object_id AND ic.column_id = c.column_id
INNER JOIN sys.indexes ix
ON ix.object_id = ic.object_id AND ix.index_id = ic.index_id
INNER JOIN sys.tables t
ON t.object_id = ix.object_id
WHERE ix.object_id IN (
SELECT ix.object_id
FROM sys.indexes ix
GROUP BY ix.object_id, ix.index_id
HAVING count(1) = 1)
AND ix.is_primary_key = 0
AND ix.is_unique_constraint = 0
AND t.name = %s"""
cursor.execute(ix_sql, (table_name,))
for column in [r[0] for r in cursor.fetchall()]:
if column not in results:
results[column] = 'IX'
for field in field_names:
val = results.get(field, None)
indexes[field] = dict(primary_key=(val=='PRIMARY KEY'), unique=(val=='UNIQUE'), db_index=(val=='IX'))
return indexes
#def get_collations_list(self, cursor):
# """
# Returns list of available collations and theirs descriptions.
# """
# # http://msdn2.microsoft.com/en-us/library/ms184391.aspx
# # http://msdn2.microsoft.com/en-us/library/ms179886.aspx
#
# cursor.execute("SELECT name, description FROM ::fn_helpcollations()")
# return [tuple(row) for row in cursor.fetchall()]
| Python |
from django.db.models.sql import compiler
from datetime import datetime
REV_ODIR = {
'ASC': 'DESC',
'DESC': 'ASC'
}
SQL_SERVER_8_LIMIT_QUERY = \
"""SELECT *
FROM (
SELECT TOP %(limit)s *
FROM (
%(orig_sql)s
ORDER BY %(ord)s
) AS %(table)s
ORDER BY %(rev_ord)s
) AS %(table)s
ORDER BY %(ord)s"""
SQL_SERVER_8_NO_LIMIT_QUERY = \
"""SELECT *
FROM %(table)s
WHERE %(key)s NOT IN (
%(orig_sql)s
ORDER BY %(ord)s
)"""
# Strategies for handling limit+offset emulation:
USE_ROW_NUMBER = 0 # For SQL Server >= 2005
USE_TOP_HMARK = 1 # For SQL Server 2000 when both limit and offset are provided
USE_TOP_LMARK = 2 # For SQL Server 2000 when offset but no limit is provided
class SQLCompiler(compiler.SQLCompiler):
def resolve_columns(self, row, fields=()):
index_start = len(self.query.extra_select.keys())
values = [self.query.convert_values(v, None, connection=self.connection) for v in row[:index_start]]
for value, field in map(None, row[index_start:], fields):
values.append(self.query.convert_values(value, field, connection=self.connection))
return tuple(values)
def modify_query(self, strategy, ordering, out_cols):
"""
Helper method, called from _as_sql()
Sets the value of the self._ord and self.default_reverse_ordering
attributes.
Can modify the values of the out_cols list argument and the
self.query.ordering_aliases attribute.
"""
self.default_reverse_ordering = False
self._ord = []
cnt = 0
extra_select_aliases = [k.strip('[]') for k in self.query.extra_select.keys()]
for ord_spec_item in ordering:
if ord_spec_item.endswith(' ASC') or ord_spec_item.endswith(' DESC'):
parts = ord_spec_item.split()
col, odir = ' '.join(parts[:-1]), parts[-1]
if col not in self.query.ordering_aliases and col.strip('[]') not in extra_select_aliases:
if col.isdigit():
cnt += 1
n = int(col)-1
alias = 'OrdAlias%d' % cnt
out_cols[n] = '%s AS [%s]' % (out_cols[n], alias)
self._ord.append((alias, odir))
elif col in out_cols:
if strategy == USE_TOP_HMARK:
cnt += 1
n = out_cols.index(col)
alias = 'OrdAlias%d' % cnt
out_cols[n] = '%s AS %s' % (col, alias)
self._ord.append((alias, odir))
else:
self._ord.append((col, odir))
elif strategy == USE_TOP_HMARK:
# Special case: '_order' column created by Django
# when Meta.order_with_respect_to is used
if col.split('.')[-1] == '[_order]' and odir == 'DESC':
self.default_reverse_ordering = True
cnt += 1
alias = 'OrdAlias%d' % cnt
self._ord.append((alias, odir))
self.query.ordering_aliases.append('%s AS [%s]' % (col, alias))
else:
self._ord.append((col, odir))
else:
self._ord.append((col, odir))
if strategy == USE_ROW_NUMBER and not self._ord and 'RAND()' in ordering:
self._ord.append(('RAND()',''))
if strategy == USE_TOP_HMARK and not self._ord:
# XXX:
#meta = self.get_meta()
meta = self.query.model._meta
qn = self.quote_name_unless_alias
pk_col = '%s.%s' % (qn(meta.db_table), qn(meta.pk.db_column or meta.pk.column))
if pk_col not in out_cols:
out_cols.append(pk_col)
def _as_sql(self, strategy):
"""
Helper method, called from as_sql()
Similar to django/db/models/sql/query.py:Query.as_sql() but without
the ordering and limits code.
Returns SQL that hasn't an order-by clause.
"""
# get_columns needs to be called before get_ordering to populate
# _select_alias.
out_cols = self.get_columns(True)
ordering, ordering_group_by = self.get_ordering()
if strategy == USE_ROW_NUMBER:
if not ordering:
meta = self.query.get_meta()
qn = self.quote_name_unless_alias
# Special case: pk not in out_cols, use random ordering.
#
if '%s.%s' % (qn(meta.db_table), qn(meta.pk.db_column or meta.pk.column)) not in self.get_columns():
ordering = ['RAND()']
# XXX: Maybe use group_by field for ordering?
#if self.group_by:
#ordering = ['%s.%s ASC' % (qn(self.group_by[0][0]),qn(self.group_by[0][1]))]
else:
ordering = ['%s.%s ASC' % (qn(meta.db_table), qn(meta.pk.db_column or meta.pk.column))]
if strategy in (USE_TOP_HMARK, USE_ROW_NUMBER):
self.modify_query(strategy, ordering, out_cols)
if strategy == USE_ROW_NUMBER:
ord = ', '.join(['%s %s' % pair for pair in self._ord])
self.query.ordering_aliases.append('(ROW_NUMBER() OVER (ORDER BY %s)) AS [rn]' % ord)
# This must come after 'select' and 'ordering' -- see docstring of
# get_from_clause() for details.
from_, f_params = self.get_from_clause()
qn = self.quote_name_unless_alias
where, w_params = self.query.where.as_sql(qn, self.connection)
having, h_params = self.query.having.as_sql(qn, self.connection)
params = []
for val in self.query.extra_select.itervalues():
params.extend(val[1])
result = ['SELECT']
if self.query.distinct:
result.append('DISTINCT')
if strategy == USE_TOP_LMARK:
# XXX:
#meta = self.get_meta()
meta = self.query.model._meta
result.append('TOP %s %s' % (self.query.low_mark, self.quote_name_unless_alias(meta.pk.db_column or meta.pk.column)))
else:
if strategy == USE_TOP_HMARK and self.query.high_mark is not None:
result.append('TOP %s' % self.query.high_mark)
result.append(', '.join(out_cols + self.query.ordering_aliases))
result.append('FROM')
result.extend(from_)
params.extend(f_params)
if where:
result.append('WHERE %s' % where)
params.extend(w_params)
grouping, gb_params = self.get_grouping()
if grouping:
if ordering:
# If the backend can't group by PK (i.e., any database
# other than MySQL), then any fields mentioned in the
# ordering clause needs to be in the group by clause.
if not self.connection.features.allows_group_by_pk:
for col, col_params in ordering_group_by:
if col not in grouping:
grouping.append(str(col))
gb_params.extend(col_params)
else:
ordering = self.connection.ops.force_no_ordering()
result.append('GROUP BY %s' % ', '.join(grouping))
params.extend(gb_params)
if having:
result.append('HAVING %s' % having)
params.extend(h_params)
return ' '.join(result), tuple(params)
def as_sql(self, with_limits=True, with_col_aliases=False):
"""
Creates the SQL for this query. Returns the SQL string and list of
parameters.
If 'with_limits' is False, any limit/offset information is not included
in the query.
"""
# The do_offset flag indicates whether we need to construct
# the SQL needed to use limit/offset w/SQL Server.
do_offset = with_limits and (self.query.high_mark is not None or self.query.low_mark != 0)
# If no offsets, just return the result of the base class
# `as_sql`.
if not do_offset:
return super(SQLCompiler, self).as_sql(with_limits=False,
with_col_aliases=with_col_aliases)
# Shortcut for the corner case when high_mark value is 0:
if self.query.high_mark == 0:
return "", ()
self.pre_sql_setup()
# XXX:
#meta = self.get_meta()
meta = self.query.model._meta
qn = self.quote_name_unless_alias
fallback_ordering = '%s.%s' % (qn(meta.db_table), qn(meta.pk.db_column or meta.pk.column))
# SQL Server 2000, offset+limit case
if self.connection.ops.sql_server_ver < 2005 and self.query.high_mark is not None:
orig_sql, params = self._as_sql(USE_TOP_HMARK)
if self._ord:
ord = ', '.join(['%s %s' % pair for pair in self._ord])
rev_ord = ', '.join(['%s %s' % (col, REV_ODIR[odir]) for col, odir in self._ord])
else:
if not self.default_reverse_ordering:
ord = '%s ASC' % fallback_ordering
rev_ord = '%s DESC' % fallback_ordering
else:
ord = '%s DESC' % fallback_ordering
rev_ord = '%s ASC' % fallback_ordering
sql = SQL_SERVER_8_LIMIT_QUERY % {
'limit': self.query.high_mark - self.query.low_mark,
'orig_sql': orig_sql,
'ord': ord,
'rev_ord': rev_ord,
# XXX:
'table': qn(meta.db_table),
}
return sql, params
# SQL Server 2005
if self.connection.ops.sql_server_ver >= 2005:
sql, params = self._as_sql(USE_ROW_NUMBER)
# Construct the final SQL clause, using the initial select SQL
# obtained above.
result = ['SELECT * FROM (%s) AS X' % sql]
# Place WHERE condition on `rn` for the desired range.
if self.query.high_mark is None:
self.query.high_mark = 9223372036854775807
result.append('WHERE X.rn BETWEEN %d AND %d' % (self.query.low_mark+1, self.query.high_mark))
return ' '.join(result), params
# SQL Server 2000, offset without limit case
# get_columns needs to be called before get_ordering to populate
# select_alias.
self.get_columns(with_col_aliases)
ordering, ordering_group_by = self.get_ordering()
if ordering:
ord = ', '.join(ordering)
else:
# We need to define an ordering clause since none was provided
ord = fallback_ordering
orig_sql, params = self._as_sql(USE_TOP_LMARK)
sql = SQL_SERVER_8_NO_LIMIT_QUERY % {
'orig_sql': orig_sql,
'ord': ord,
'table': qn(meta.db_table),
'key': qn(meta.pk.db_column or meta.pk.column),
}
return sql, params
class SQLInsertCompiler(compiler.SQLInsertCompiler, SQLCompiler):
def as_sql(self):
# We don't need quote_name_unless_alias() here, since these are all
# going to be column names (so we can avoid the extra overhead).
qn = self.connection.ops.quote_name
opts = self.query.model._meta
result = ['INSERT INTO %s' % qn(opts.db_table)]
result.append('(%s)' % ', '.join([qn(c) for c in self.query.columns]))
if self.return_id and self.connection.features.can_return_id_from_insert:
output = 'OUTPUT inserted.%s' % qn(opts.pk.column)
result.append(output)
values = [self.placeholder(*v) for v in self.query.values]
result.append('VALUES (%s)' % ', '.join(values))
params = self.query.params
sql = ' '.join(result)
meta = self.query.get_meta()
if meta.has_auto_field:
# db_column is None if not explicitly specified by model field
auto_field_column = meta.auto_field.db_column or meta.auto_field.column
if auto_field_column in self.query.columns:
quoted_table = self.connection.ops.quote_name(meta.db_table)
if len(self.query.columns) == 1 and not params:
sql = "INSERT INTO %s DEFAULT VALUES" % quoted_table
else:
sql = "SET IDENTITY_INSERT %s ON;\n%s;\nSET IDENTITY_INSERT %s OFF" % \
(quoted_table, sql, quoted_table)
return sql, params
class SQLDeleteCompiler(compiler.SQLDeleteCompiler, SQLCompiler):
pass
class SQLUpdateCompiler(compiler.SQLUpdateCompiler, SQLCompiler):
pass
class SQLAggregateCompiler(compiler.SQLAggregateCompiler, SQLCompiler):
pass
class SQLDateCompiler(compiler.SQLDateCompiler, SQLCompiler):
pass
| Python |
from django.db.backends import BaseDatabaseClient
import os
import sys
class DatabaseClient(BaseDatabaseClient):
if os.name=='nt':
executable_name = 'osql'
else:
executable_name = 'isql'
def runshell(self):
settings_dict = self.connection.settings_dict
user = settings_dict['DATABASE_OPTIONS'].get('user', settings_dict['DATABASE_USER'])
password = settings_dict['DATABASE_OPTIONS'].get('passwd', settings_dict['DATABASE_PASSWORD'])
if os.name=='nt':
db = settings_dict['DATABASE_OPTIONS'].get('db', settings_dict['DATABASE_NAME'])
server = settings_dict['DATABASE_OPTIONS'].get('host', settings_dict['DATABASE_HOST'])
port = settings_dict['DATABASE_OPTIONS'].get('port', settings_dict['DATABASE_PORT'])
defaults_file = settings_dict['DATABASE_OPTIONS'].get('read_default_file')
args = [self.executable_name]
if server:
args += ["-S", server]
if user:
args += ["-U", user]
if password:
args += ["-P", password]
else:
args += ["-E"] # Try trusted connection instead
if db:
args += ["-d", db]
if defaults_file:
args += ["-i", defaults_file]
else:
dsn = settings_dict['DATABASE_OPTIONS'].get('dsn', settings_dict['DATABASE_ODBC_DSN'])
args = ['%s -v %s %s %s' % (self.executable_name, dsn, user, password)]
# XXX: This works only with Python >= 2.4 because subprocess was added
# in that release
import subprocess
try:
subprocess.call(args, shell=True)
except KeyboardInterrupt:
pass
| Python |
from java.lang import Class
from java.lang import System
from java.io import PrintWriter
from java.sql import DriverManager
from java.sql import Connection
from java.sql import SQLException
# Here are the dbcp-specific classes.
# Note that they are only used in the setupDriver
# method. In normal use, your classes interact
# only with the standard JDBC API
from org.apache.commons.pool.impl import GenericObjectPool
from org.apache.commons.dbcp import PoolableConnectionFactory
from org.apache.commons.dbcp import BasicDataSource
from org.apache.commons.dbcp import DataSourceConnectionFactory
import time
class ManualPoolingDriver(object):
def __init__(self, connectURI, username, password, pool_name):
self.connectionPool = GenericObjectPool(None)
self._pool_name = pool_name
source = BasicDataSource()
source.setUrl(connectURI)
source.setUsername(username)
source.setPassword(password)
source.setInitialSize(1) # Number of connections to start with
source.setMinIdle(5) # Allow a bottom of 5 idle connections
source.setMaxActive(10) # Max of 10 database connection
source.setDefaultTransactionIsolation(Connection.TRANSACTION_READ_COMMITTED)
source.setMinEvictableIdleTimeMillis(500)
self.connectionFactory = DataSourceConnectionFactory(source)
# Now we'll create the PoolableConnectionFactory, which wraps
# the "real" Connections created by the ConnectionFactory with
# the classes that implement the pooling functionality.
self.poolableConnectionFactory = PoolableConnectionFactory(self.connectionFactory,
self.connectionPool,
None,
None,
False,
True)
# Finally, we create the PoolingDriver itself...
Class.forName("org.apache.commons.dbcp.PoolingDriver")
driver = DriverManager.getDriver("jdbc:apache:commons:dbcp:")
# ...and register our pool with it.
driver.registerPool(self._pool_name, self.connectionPool)
# Now we can just use the connect string "jdbc:apache:commons:dbcp:<pool_name>"
# to access our pool of Connections.
def printDriverStats(self):
driver = DriverManager.getDriver("jdbc:apache:commons:dbcp:")
connectionPool = driver.getConnectionPool(self._pool_name)
print "NumActive: " + str(connectionPool.getNumActive())
print "NumIdle: " + str(connectionPool.getNumIdle())
def shutdownDriver(self):
driver = DriverManager.getDriver("jdbc:apache:commons:dbcp:")
driver.closePool(self._pool_name)
| Python |
from django.db.backends import BaseDatabaseOperations
import query
import datetime
import time
import decimal
class DatabaseOperations(BaseDatabaseOperations):
compiler_module = "doj.backends.zxjdbc.sql_server.compiler"
def __init__(self, connection):
super(DatabaseOperations, self).__init__()
self.connection = connection
self._ss_ver = None
def _get_sql_server_ver(self):
"""
Returns the version of the SQL Server in use:
"""
if self._ss_ver is not None:
return self._ss_ver
cur = self.connection.cursor()
cur.execute("SELECT CAST(SERVERPROPERTY('ProductVersion') as varchar)")
ver_code = int(cur.fetchone()[0].split('.')[0])
if ver_code >= 10:
self._ss_ver = 2008
elif ver_code == 9:
self._ss_ver = 2005
else:
self._ss_ver = 2000
return self._ss_ver
sql_server_ver = property(_get_sql_server_ver)
def date_extract_sql(self, lookup_type, field_name):
"""
Given a lookup_type of 'year', 'month', 'day' or 'week_day', returns
the SQL that extracts a value from the given date field field_name.
"""
if lookup_type == 'week_day':
return "DATEPART(dw, %s)" % field_name
else:
return "DATEPART(%s, %s)" % (lookup_type, field_name)
def date_trunc_sql(self, lookup_type, field_name):
"""
Given a lookup_type of 'year', 'month' or 'day', returns the SQL that
truncates the given date field field_name to a DATE object with only
the given specificity.
"""
if lookup_type == 'year':
return "Convert(datetime, Convert(varchar, DATEPART(year, %s)) + '/01/01')" % field_name
if lookup_type == 'month':
return "Convert(datetime, Convert(varchar, DATEPART(year, %s)) + '/' + Convert(varchar, DATEPART(month, %s)) + '/01')" % (field_name, field_name)
if lookup_type == 'day':
return "Convert(datetime, Convert(varchar(12), %s, 112))" % field_name
def field_cast_sql(self, db_type):
"""
Given a column type (e.g. 'BLOB', 'VARCHAR'), returns the SQL necessary
to cast it before using it in a WHERE statement. Note that the
resulting string should contain a '%s' placeholder for the column being
searched against.
"""
if db_type:
if self.sql_server_ver < 2005 and db_type.lower() == 'ntext':
return 'CAST(%s as nvarchar)'
elif 'datetime' == db_type.lower():
# We need to convert date and datetime columns into
# ODBC canonical format.
# See: http://msdn.microsoft.com/en-us/library/ms187928.aspx
return "CONVERT(varchar, %s, 120)"
elif 'smalldatetime' == db_type.lower():
return "SUBSTRING(CONVERT(varchar, %s, 120), 1, 10)"
return '%s'
def fulltext_search_sql(self, field_name):
"""
Returns the SQL WHERE clause to use in order to perform a full-text
search of the given field_name. Note that the resulting string should
contain a '%s' placeholder for the value being searched against.
"""
return 'CONTAINS(%s, %%s)' % field_name
def last_insert_id(self, cursor, table_name, pk_name):
"""
Given a cursor object that has just performed an INSERT statement into
a table that has an auto-incrementing ID, returns the newly created ID.
This method also receives the table name and the name of the primary-key
column.
"""
# TODO: Check how the `last_insert_id` is being used in the upper layers
# in context of multithreaded access, compare with other backends
# IDENT_CURRENT: http://msdn2.microsoft.com/en-us/library/ms175098.aspx
# SCOPE_IDENTITY: http://msdn2.microsoft.com/en-us/library/ms190315.aspx
# @@IDENTITY: http://msdn2.microsoft.com/en-us/library/ms187342.aspx
# IDENT_CURRENT is not limited by scope and session; it is limited to
# a specified table. IDENT_CURRENT returns the value generated for
# a specific table in any session and any scope.
# SCOPE_IDENTITY and @@IDENTITY return the last identity values that
# are generated in any table in the current session. However,
# SCOPE_IDENTITY returns values inserted only within the current scope;
# @@IDENTITY is not limited to a specific scope.
table_name = self.quote_name(table_name)
cursor.execute("SELECT CAST(IDENT_CURRENT(%s) as bigint)", [table_name])
return cursor.fetchone()[0]
def fetch_returned_insert_id(self, cursor):
"""
Given a cursor object that has just performed an INSERT/OUTPUT statement
into a table that has an auto-incrementing ID, returns the newly created
ID.
"""
return cursor.fetchone()[0]
def lookup_cast(self, lookup_type):
if lookup_type in ('iexact', 'icontains', 'istartswith', 'iendswith'):
return "UPPER(%s)"
return "%s"
def query_class(self, DefaultQueryClass):
"""
Given the default Query class, returns a custom Query class
to use for this backend. Returns None if a custom Query isn't used.
See also BaseDatabaseFeatures.uses_custom_query_class, which regulates
whether this method is called at all.
"""
return query.query_class(DefaultQueryClass)
def quote_name(self, name):
"""
Returns a quoted version of the given table, index or column name. Does
not quote the given name if it's already been quoted.
"""
if 'CONVERT(' in name:
# SQL Server has a fragile parser. If we've already applied CONVERT
# on a column, treat this column as pre-quoted.
# No - it doesn't make any sense. Don't think too hard about this.
return name
if name.startswith('[') and name.endswith(']'):
return name # Quoting once is enough.
return '[%s]' % name
def random_function_sql(self):
"""
Returns a SQL expression that returns a random value.
"""
return "RAND()"
def last_executed_query(self, cursor, sql, params):
"""
Returns a string of the query last executed by the given cursor, with
placeholders replaced with actual values.
`sql` is the raw query containing placeholders, and `params` is the
sequence of parameters. These are used by default, but this method
exists for database backends to provide a better implementation
according to their own quoting schemes.
"""
return super(DatabaseOperations, self).last_executed_query(cursor, cursor.last_sql, cursor.last_params)
def no_limit_value(self):
return None
def regex_lookup(self, lookup_type):
"""
Returns the string to use in a query when performing regular expression
lookups (using "regex" or "iregex"). The resulting string should
contain a '%s' placeholder for the column being searched against.
If the feature is not supported (or part of it is not supported), a
NotImplementedError exception can be raised.
"""
if lookup_type == 'regex':
ignore_case = 0
else:
ignore_case = 1
return "dbo.regex(%%s, %%s, %s) = 1" % ignore_case
#def savepoint_create_sql(self, sid):
# """
# Returns the SQL for starting a new savepoint. Only required if the
# "uses_savepoints" feature is True. The "sid" parameter is a string
# for the savepoint id.
# """
# return "SAVE TRANSACTION %s" % sid
#def savepoint_commit_sql(self, sid):
# """
# Returns the SQL for committing the given savepoint.
# """
# return "COMMIT TRANSACTION %s" % sid
#def savepoint_rollback_sql(self, sid):
# """
# Returns the SQL for rolling back the given savepoint.
# """
# return "ROLLBACK TRANSACTION %s" % sid
def sql_flush(self, style, tables, sequences):
"""
Returns a list of SQL statements required to remove all data from
the given database tables (without actually removing the tables
themselves).
The `style` argument is a Style object as returned by either
color_style() or no_style() in django.core.management.color.
"""
if tables:
# Cannot use TRUNCATE on tables that are referenced by a FOREIGN KEY
# So must use the much slower DELETE
from django.db import connection
cursor = connection.cursor()
# Try to minimize the risks of the braindeaded inconsistency in
# DBCC CHEKIDENT(table, RESEED, n) behavior.
seqs = []
for seq in sequences:
cursor.execute("SELECT COUNT(*) FROM %s" % self.quote_name(seq["table"]))
rowcnt = cursor.fetchone()[0]
elem = {}
if rowcnt:
elem['start_id'] = 0
else:
elem['start_id'] = 1
elem.update(seq)
seqs.append(elem)
cursor.execute("SELECT TABLE_NAME, CONSTRAINT_NAME FROM INFORMATION_SCHEMA.TABLE_CONSTRAINTS WHERE CONSTRAINT_TYPE not in ('PRIMARY KEY','UNIQUE')")
fks = cursor.fetchall()
qn = self.quote_name
#turn constraints off
sql_list = ['ALTER TABLE %s NOCHECK CONSTRAINT %s;' % \
(qn(fk[0]), qn(fk[1])) for fk in fks]
sql_list.extend(['%s %s %s;' % (style.SQL_KEYWORD('DELETE'), style.SQL_KEYWORD('FROM'),
style.SQL_FIELD(qn(table)) ) for table in tables])
# Then reset the counters on each table.
sql_list.extend(['%s %s (%s, %s, %s) %s %s;' % (
style.SQL_KEYWORD('DBCC'),
style.SQL_KEYWORD('CHECKIDENT'),
style.SQL_FIELD(self.quote_name(seq["table"])),
style.SQL_KEYWORD('RESEED'),
style.SQL_FIELD('%d' % seq['start_id']),
style.SQL_KEYWORD('WITH'),
style.SQL_KEYWORD('NO_INFOMSGS'),
) for seq in seqs])
#Turn constraints back on
sql_list.extend(['ALTER TABLE %s CHECK CONSTRAINT %s;' % \
(qn(fk[0]), qn(fk[1])) for fk in fks])
return sql_list
else:
return []
#def sequence_reset_sql(self, style, model_list):
# """
# Returns a list of the SQL statements required to reset sequences for
# the given models.
#
# The `style` argument is a Style object as returned by either
# color_style() or no_style() in django.core.management.color.
# """
# from django.db import models
# output = []
# for model in model_list:
# for f in model._meta.local_fields:
# if isinstance(f, models.AutoField):
# output.append(...)
# break # Only one AutoField is allowed per model, so don't bother continuing.
# for f in model._meta.many_to_many:
# output.append(...)
# return output
def start_transaction_sql(self):
"""
Returns the SQL statement required to start a transaction.
"""
return "BEGIN TRANSACTION"
def sql_for_tablespace(self, tablespace, inline=False):
"""
Returns the SQL that will be appended to tables or rows to define
a tablespace. Returns '' if the backend doesn't use tablespaces.
"""
return "ON %s" % self.quote_name(tablespace)
def prep_for_like_query(self, x):
"""Prepares a value for use in a LIKE query."""
from django.utils.encoding import smart_unicode
# http://msdn2.microsoft.com/en-us/library/ms179859.aspx
return smart_unicode(x).\
replace('\\', '\\\\').\
replace('[', '[[]').\
replace('%', '[%]').\
replace('_', '[_]')
def prep_for_iexact_query(self, x):
"""
Same as prep_for_like_query(), but called for "iexact" matches, which
need not necessarily be implemented using "LIKE" in the backend.
"""
return x
def value_to_db_datetime(self, value):
"""
Transform a datetime value to an object compatible with what is expected
by the backend driver for datetime columns.
"""
if value is None:
return None
# SQL Server doesn't support microseconds
return value.replace(microsecond=0)
def value_to_db_time(self, value):
"""
Transform a time value to an object compatible with what is expected
by the backend driver for time columns.
"""
if value is None:
return None
# SQL Server doesn't support microseconds
if isinstance(value, basestring):
return datetime.datetime(*(time.strptime(value, '%H:%M:%S')[:6]))
return datetime.datetime(1900, 1, 1, value.hour, value.minute, value.second)
def year_lookup_bounds(self, value):
"""
Returns a two-elements list with the lower and upper bound to be used
with a BETWEEN operator to query a field value using a year lookup
`value` is an int, containing the looked-up year.
"""
first = '%s-01-01 00:00:00'
# SQL Server doesn't support microseconds
last = '%s-12-31 23:59:59'
return [first % value, last % value]
def value_to_db_decimal(self, value, max_digits, decimal_places):
"""
Transform a decimal.Decimal value to an object compatible with what is
expected by the backend driver for decimal (numeric) columns.
"""
if value is None:
return None
if isinstance(value, decimal.Decimal):
context = decimal.getcontext().copy()
context.prec = max_digits
return u"%.*f" % (decimal_places, value.quantize(decimal.Decimal(".1") ** decimal_places, context=context))
else:
return u"%.*f" % (decimal_places, value)
def convert_values(self, value, field):
"""
Coerce the value returned by the database backend into a consistent
type that is compatible with the field type.
In our case, cater for the fact that SQL Server < 2008 has no
separate Date and Time data types.
TODO: See how we'll handle this for SQL Server >= 2008
"""
if value is None:
return None
if field and field.get_internal_type() == 'DateTimeField':
return value
elif field and field.get_internal_type() == 'DateField':
value = value.date() # extract date
elif field and field.get_internal_type() == 'TimeField' or (isinstance(value, datetime.datetime) and value.year == 1900 and value.month == value.day == 1):
value = value.time() # extract time
# Some cases (for example when select_related() is used) aren't
# caught by the DateField case above and date fields arrive from
# the DB as datetime instances.
# Implement a workaround stealing the idea from the Oracle
# backend. It's not perfect so the same warning applies (i.e. if a
# query results in valid date+time values with the time part set
# to midnight, this workaround can surprise us by converting them
# to the datetime.date Python type).
elif isinstance(value, datetime.datetime) and value.hour == value.minute == value.second == value.microsecond == 0:
value = value.date()
# Force floats to the correct type
elif value is not None and field and field.get_internal_type() == 'FloatField':
value = float(value)
return value
| Python |
from django.db.backends.creation import BaseDatabaseCreation
import base64
from django.utils.hashcompat import md5_constructor
import random
class DataTypesWrapper(dict):
def __getitem__(self, item):
if item in ('PositiveIntegerField', 'PositiveSmallIntegerField'):
# The check name must be unique for the database. Add a random
# component so the regresion tests don't complain about duplicate names
fldtype = {'PositiveIntegerField': 'int', 'PositiveSmallIntegerField': 'smallint'}[item]
rnd_hash = md5_constructor(str(random.random())).hexdigest()
unique = base64.b64encode(rnd_hash, '__')[:6]
return '%(fldtype)s CONSTRAINT [CK_%(fldtype)s_pos_%(unique)s_%%(column)s] CHECK ([%%(column)s] >= 0)' % locals()
return super(DataTypesWrapper, self).__getitem__(item)
class DatabaseCreation(BaseDatabaseCreation):
# This dictionary maps Field objects to their associated MS SQL column
# types, as strings. Column-type strings can contain format strings; they'll
# be interpolated against the values of Field.__dict__ before being output.
# If a column type is set to None, it won't be included in the output.
#
# Any format strings starting with "qn_" are quoted before being used in the
# output (the "qn_" prefix is stripped before the lookup is performed.
data_types = DataTypesWrapper({
#data_types = {
'AutoField': 'int IDENTITY (1, 1)',
'BooleanField': 'bit',
'CharField': 'nvarchar(%(max_length)s)',
'CommaSeparatedIntegerField': 'nvarchar(%(max_length)s)',
'DateField': 'datetime',
'DateTimeField': 'datetime',
'DecimalField': 'numeric(%(max_digits)s, %(decimal_places)s)',
'FileField': 'nvarchar(%(max_length)s)',
'FilePathField': 'nvarchar(%(max_length)s)',
'FloatField': 'double precision',
'IntegerField': 'int',
'IPAddressField': 'nvarchar(15)',
'NullBooleanField': 'bit',
'OneToOneField': 'int',
#'PositiveIntegerField': 'integer CONSTRAINT [CK_int_pos_%(column)s] CHECK ([%(column)s] >= 0)',
#'PositiveSmallIntegerField': 'smallint CONSTRAINT [CK_smallint_pos_%(column)s] CHECK ([%(column)s] >= 0)',
'SlugField': 'nvarchar(%(max_length)s)',
'SmallIntegerField': 'smallint',
'TextField': 'nvarchar(max)',
'TimeField': 'datetime',
#}
})
def _destroy_test_db(self, test_database_name, verbosity):
"Internal implementation - remove the test db tables."
cursor = self.connection.cursor()
self.set_autocommit()
#time.sleep(1) # To avoid "database is being accessed by other users" errors.
cursor.execute("ALTER DATABASE %s SET SINGLE_USER WITH ROLLBACK IMMEDIATE " % \
self.connection.ops.quote_name(test_database_name))
cursor.execute("DROP DATABASE %s" % \
self.connection.ops.quote_name(test_database_name))
self.connection.close()
| Python |
from django.db.models.sql.aggregates import *
class StdDev(Aggregate):
is_computed = True
def __init__(self, col, sample=False, **extra):
super(StdDev, self).__init__(col, **extra)
self.sql_function = sample and 'STDEV' or 'STDEVP'
class Variance(Aggregate):
is_computed = True
def __init__(self, col, sample=False, **extra):
super(Variance, self).__init__(col, **extra)
self.sql_function = sample and 'VAR' or 'VARP'
class Avg(Aggregate):
is_computed = True
sql_function = 'AVG'
sql_template = '%(function)s(Convert(FLOAT, %(field)s))'
| Python |
"""
jTDS/MSSQL2005 database backend for Django.
"""
try:
# Force the database driver to load
from java.lang import Class
cls = Class.forName("net.sourceforge.jtds.jdbc.Driver").newInstance()
from pool import ManualPoolingDriver
from com.ziclix.python.sql import zxJDBC as Database
from com.ziclix.python.sql import zxJDBC
from com.ziclix.python.sql import PyStatement, PyExtendedCursor, PyCursor, PyConnection
except ImportError, e:
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured("Error loading zxJDBC module: %s" % e)
from django.db.backends import BaseDatabaseWrapper, BaseDatabaseFeatures, BaseDatabaseValidation
from doj.backends.zxjdbc.common import zxJDBCDatabaseWrapper
from django.db.backends.signals import connection_created
from django.conf import settings
# unchecked imports
from operations import DatabaseOperations
from client import DatabaseClient
from creation import DatabaseCreation
from introspection import DatabaseIntrospection
import os
import warnings
warnings.filterwarnings('error', 'The DATABASE_ODBC.+ is deprecated',DeprecationWarning, __name__, 0)
collation = 'Latin1_General_CI_AS'
if 'collation' in settings.DATABASE_OPTIONS:
collation = settings.DATABASE_OPTIONS['collation']
DatabaseError = zxJDBC.DatabaseError
IntegrityError = zxJDBC.IntegrityError
class DatabaseFeatures(BaseDatabaseFeatures):
uses_custom_query_class = True
can_use_chunked_reads = False
can_return_id_from_insert = True
#uses_savepoints = True
class DatabaseWrapper(zxJDBCDatabaseWrapper):
MARS_Connection = False
unicode_results = False
# Collations: http://msdn2.microsoft.com/en-us/library/ms184391.aspx
# http://msdn2.microsoft.com/en-us/library/ms179886.aspx
# T-SQL LIKE: http://msdn2.microsoft.com/en-us/library/ms179859.aspx
# Full-Text search: http://msdn2.microsoft.com/en-us/library/ms142571.aspx
# CONTAINS: http://msdn2.microsoft.com/en-us/library/ms187787.aspx
# FREETEXT: http://msdn2.microsoft.com/en-us/library/ms176078.aspx
operators = {
# Since '=' is used not only for string comparision there is no way
# to make it case (in)sensitive. It will simply fallback to the
# database collation.
'exact': '= %s',
'iexact': "= UPPER(%s)",
'contains': "LIKE %s ESCAPE '\\' COLLATE " + collation,
'icontains': "LIKE UPPER(%s) ESCAPE '\\' COLLATE "+ collation,
'gt': '> %s',
'gte': '>= %s',
'lt': '< %s',
'lte': '<= %s',
'startswith': "LIKE %s ESCAPE '\\' COLLATE " + collation,
'endswith': "LIKE %s ESCAPE '\\' COLLATE " + collation,
'istartswith': "LIKE UPPER(%s) ESCAPE '\\' COLLATE " + collation,
'iendswith': "LIKE UPPER(%s) ESCAPE '\\' COLLATE " + collation,
# TODO: remove, keep native T-SQL LIKE wildcards support
# or use a "compatibility layer" and replace '*' with '%'
# and '.' with '_'
'regex': 'LIKE %s COLLATE ' + collation,
'iregex': 'LIKE %s COLLATE ' + collation,
# TODO: freetext, full-text contains...
}
def __init__(self, *args, **kwargs):
super(DatabaseWrapper, self).__init__(*args, **kwargs)
self._LAST_DATABASE_NAME = None
self._db_count = 0
self.connection = None
if 'OPTIONS' in self.settings_dict:
self.MARS_Connection = self.settings_dict['OPTIONS'].get('MARS_Connection', False)
self.unicode_results = self.settings_dict['OPTIONS'].get('unicode_results', False)
self.features = DatabaseFeatures(self)
self.ops = DatabaseOperations(self)
self.client = DatabaseClient(self)
self.creation = DatabaseCreation(self)
self.introspection = DatabaseIntrospection(self)
self.validation = BaseDatabaseValidation(self)
def _cursor(self):
new_conn = False
settings_dict = self.settings_dict
if self.connection is None:
new_conn = True
self.connection = self.new_jndi_connection()
if self.connection is None:
pool_name = self._register_driver()
if not settings_dict['NAME']:
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured('You need to specify NAME in your Django settings file.')
url='jdbc:apache:commons:dbcp:%s' % pool_name
self.connection = Database.connect(url, None, None, 'org.apache.commons.dbcp.PoolingDriver')
connection_created.send(sender=self.__class__)
cursor = self.connection.cursor()
if new_conn:
# Set date format for the connection.
cursor.execute("SET DATEFORMAT ymd; SET DATEFIRST 7")
# SQL Server violates the SQL standard w.r.t handling NULL values in UNIQUE columns.
# We work around this by creating schema bound views on tables with with nullable unique columns
# but we need to modify the cursor to abort if the view has problems.
# See http://blogs.msdn.com/sqlcat/archive/2005/12/20/506138.aspx
cursor.execute("SET ARITHABORT ON")
cursor.execute("SET CONCAT_NULL_YIELDS_NULL ON")
cursor.execute("SET QUOTED_IDENTIFIER ON")
cursor.execute("SET ANSI_NULLS ON")
cursor.execute("SET ANSI_PADDING ON")
cursor.execute("SET ANSI_WARNINGS ON")
cursor.execute("SET NUMERIC_ROUNDABORT OFF")
cursor.execute("SET TRANSACTION ISOLATION LEVEL SERIALIZABLE")
# jTDS can't execute some sql like CREATE DATABASE etc. in
# Multi-statement, so we need to commit the above SQL sentences to
# avoid this
return CursorWrapper(cursor)
def _register_driver(self):
# Configure the pooled connection driver
if self._LAST_DATABASE_NAME == self.settings_dict['NAME']:
return "jdbc_pool_%s" % self._db_count
self._db_count += 1
pool_name = "jdbc_pool_%s" % self._db_count
db_dict = {
'NAME': self.settings_dict['NAME'],
'HOST': self.settings_dict['HOST'] or 'localhost',
'PORT': self.settings_dict['PORT'] or 1433,
}
self.driver = ManualPoolingDriver("jdbc:jtds:sqlserver://%(HOST)s:%(PORT)s/%(NAME)s" % db_dict,
self.settings_dict['USER'],
self.settings_dict['PASSWORD'],
pool_name,
)
self._LAST_DATABASE_NAME = self.settings_dict['NAME']
return pool_name
class CursorWrapper(object):
"""
A wrapper around the pyodbc's cursor that takes in account a) some pyodbc
DB-API 2.0 implementation and b) some common ODBC driver particularities.
"""
def __init__(self, cursor):
self.cursor = cursor
self.last_sql = ''
self.last_params = ()
def format_sql(self, sql, n_params=None):
# zxjdbc uses '?' instead of '%s' as parameter placeholder.
if n_params is not None:
sql = sql % tuple('?'*n_params)
elif "%s" in sql:
sql = sql.replace('%s', '?')
return sql
def format_params(self, params):
fp = []
for p in params:
if isinstance(p, unicode) or isinstance(p, str):
fp.append(p)
elif isinstance(p, type(True)):
if p:
fp.append(1)
else:
fp.append(0)
else:
fp.append(p)
return tuple(fp)
def execute(self, sql, params=()):
self.last_sql = sql
sql = self.format_sql(sql, len(params))
params = self.format_params(params)
self.last_params = params
return self.cursor.execute(sql, params)
def executemany(self, sql, params_list):
sql = self.format_sql(sql)
# zxjdbc's cursor.executemany() doesn't support an empty param_list
if not params_list:
if '?' in sql:
return
else:
raw_pll = params_list
params_list = [self.format_params(p) for p in raw_pll]
return self.cursor.executemany(sql, params_list)
def format_results(self, rows):
"""
Decode data coming from the database if needed and convert rows to tuples
(zxJDBC Rows are not sliceable).
"""
fr = []
for row in rows:
fr.append(row)
return tuple(fr)
def fetchone(self):
row = self.cursor.fetchone()
if row is not None:
return self.format_results(row)
return []
def fetchmany(self, chunk):
return [self.format_results(row) for row in self.cursor.fetchmany(chunk)]
def fetchall(self):
return [self.format_results(row) for row in self.cursor.fetchall()]
def __getattr__(self, attr):
if attr in self.__dict__:
return self.__dict__[attr]
return getattr(self.cursor, attr)
def __iter__(self):
return iter(self.cursor)
| Python |
"""Custom Query class for MS SQL Serever."""
# query_class returns the base class to use for Django queries.
# The custom 'SqlServerQuery' class derives from django.db.models.sql.query.Query
# which is passed in as "QueryClass" by Django itself.
#
# SqlServerQuery overrides:
# ...insert queries to add "SET IDENTITY_INSERT" if needed.
# ...select queries to emulate LIMIT/OFFSET for sliced queries.
#DEBUG=True
DEBUG=False
import string
# Cache. Maps default query class to new SqlServer query class.
_classes = {}
from com.ziclix.python.sql import PyStatement, PyExtendedCursor, PyCursor
from java.sql import Types
def query_class(QueryClass):
"""Return a custom Query subclass for SQL Server."""
class SqlServerQuery(QueryClass):
def __init__(self, *args, **kwargs):
super(SqlServerQuery, self).__init__(*args, **kwargs)
# If we are an insert query, wrap "as_sql"
if self.__class__.__name__ == "InsertQuery":
self._parent_as_sql = self.as_sql
self.as_sql = self._insert_as_sql
def __reduce__(self):
"""
Enable pickling for this class (normal pickling handling doesn't
work as Python can only pickle module-level classes by default).
"""
if hasattr(QueryClass, '__getstate__'):
assert hasattr(QueryClass, '__setstate__')
data = self.__getstate__()
else:
data = self.__dict__
return (unpickle_query_class, (QueryClass,), data)
def get_primary_keys(self):
return set([f for f in self.model._meta.fields if f.primary_key])
def resolve_columns(self, row, fields=()):
"""
Cater for the fact that SQL Server has no separate Date and Time
data types.
"""
from django.db.models.fields import DateField, DateTimeField, \
TimeField, BooleanField, NullBooleanField
values = []
for value, field in map(None, row, fields):
if value is not None:
if isinstance(field, DateTimeField):
# DateTimeField subclasses DateField so must be checked
# first.
pass # do nothing
elif isinstance(field, DateField):
value = value.date() # extract date
elif isinstance(field, TimeField):
value = value.time() # extract time
elif isinstance(field, (BooleanField, NullBooleanField)):
if value in (1,'t','True','1',True):
value = True
else:
value = False
values.append(value)
return values
def as_sql(self, with_limits=True, with_col_aliases=False):
"""
"""
# By default, just build the result and params with the superclass results
sql, params = super(SqlServerQuery, self).as_sql(with_limits=False,
with_col_aliases=with_col_aliases)
# Now comes the tricky part, we need to specialize the query to work against SQL Server 2k
# Stuff to watch for
if DEBUG:
print "SQL [%s] " % sql
print "Params [%s] " % str(params)
print "High mark [%s] " % self.high_mark
print "Low mark [%s] " % self.low_mark
print "Distinct [%s] " % self.distinct
print "With Limits [%s] " % with_limits
print "With col aliases [%s] " % with_col_aliases
print "Columns [%s] " % self.get_columns(with_col_aliases)
print "Ordering [%s] " % self.get_ordering()
if self.high_mark or self.low_mark:
# Ok, we do a lot of finagling here just to get pagination
cstmt = self._setup_pagination(sql, params, with_limits, with_col_aliases, \
self.low_mark, self.high_mark)
sql, params = PyStatement(cstmt, '', PyStatement.STATEMENT_PREPARED), ()
return sql, params
def _insert_as_sql(self, *args, **kwargs):
meta = self.get_meta()
quoted_table = self.connection.ops.quote_name(meta.db_table)
# Get (sql,params) from original InsertQuery.as_sql
sql, params = self._parent_as_sql(*args,**kwargs)
if (meta.pk.attname in self.columns) and (meta.pk.__class__.__name__ == "AutoField"):
# check if only have pk and default value
if len(self.columns)==1 and len(params)==0:
sql = "INSERT INTO %s DEFAULT VALUES" % quoted_table
else:
sql = "SET IDENTITY_INSERT %s ON;%s;SET IDENTITY_INSERT %s OFF" %\
(quoted_table, sql, quoted_table)
return sql, params
def clone(self, klass=None, **kwargs):
# Just use the parent clone - don't specialize any queries
result = super(SqlServerQuery, self).clone(klass, **kwargs)
return result
def execute_sql(self, result_type='multi'):
"""
Run the query against the database and returns the result(s). The
return value is a single data item if result_type is SINGLE, or an
iterator over the results if the result_type is MULTI.
result_type is either MULTI (use fetchmany() to retrieve all rows),
SINGLE (only retrieve a single row), or None (no results expected, but
the cursor is returned, since it's used by subclasses such as
InsertQuery).
"""
from django.db.models.sql.constants import MULTI, SINGLE, GET_ITERATOR_CHUNK_SIZE
if self.high_mark and self.high_mark <= self.low_mark:
# Short circuit if we're slicing nothing
return []
# Pull in these imports from main Django DB
# code base, but we can't import at the top level
# or else we get circular imports
from django.db.models.sql.datastructures import EmptyResultSet
from django.db.models.sql.query import empty_iter
try:
sql, params = self.as_sql()
if not sql:
raise EmptyResultSet
except EmptyResultSet:
if result_type == MULTI:
return empty_iter()
else:
return
cursor = self.connection.cursor()
if isinstance(sql, PyStatement):
# We sometimes need to override with a PyStatement because
# it's the only way to implement paginated results
pycur = cursor
while not isinstance(pycur, PyCursor):
pycur = pycur.cursor
sql.execute(pycur, None, None)
else:
if DEBUG:
print sql, params
cursor.execute(sql, params)
if not result_type:
return cursor
if result_type == SINGLE:
if self.ordering_aliases:
return cursor.fetchone()[:-len(self.ordering_aliases)]
return cursor.fetchone()
# The MULTI case.
if self.ordering_aliases:
from django.db.models.sql.query import order_modified_iter
result = order_modified_iter(cursor, len(self.ordering_aliases),
self.connection.features.empty_fetchmany_value)
else:
result = iter((lambda: cursor.fetchmany(GET_ITERATOR_CHUNK_SIZE)),
self.connection.features.empty_fetchmany_value)
# Need to serialize all the results because we don't want to maintain
# state between requests
result = list(result)
# Force the PyStatement to close if we ever created one
if isinstance(sql, PyStatement):
sql.close()
# Drop the temp table
cur = self.connection.cursor()
cur.execute("drop table #temp_table")
cur.close()
return result
def get_ordering(self):
result = super(SqlServerQuery, self).get_ordering()
if self.ordering_aliases and self.distinct:
# Clear ordering aliases if we're using a distinct query.
# Ordering aliases will just screw things up
self.ordering_aliases = []
return result
def _setup_pagination(self, sql, params, with_limits, with_col_aliases, \
low_mark, high_mark):
# Ok, got the column labels, now extract the type information by running the query *twice*
# Yes, horribly inefficient, but how are you really going to handle all the bizarre corner
# cases for SQL mangling?
shim_sql = self._get_temp_table_cols(sql, params)
# Ok, so we need to obtain the raw JDBC connection, create a prepared statement
# and append the ORDERING_
cursor = self.connection.cursor()
jconn = cursor.cursor.connection.__connection__
cstmt = jconn.prepareCall("returnpage(?, ?, ?, ?)");
cstmt.setString('@query', shim_sql)
cstmt.setString('@orderby', "djy_sql2k_sort_id ASC")
# The *only* ordering alias we care about during pagination since we are
# forcing the output of the original SQL select to go into
self.ordering_aliases = ['djy_sql2k_sort_id']
from django.db.models.sql.constants import GET_ITERATOR_CHUNK_SIZE
if low_mark and high_mark:
low, high = low_mark +1, high_mark
elif low_mark:
# We limit the upper bound to GET_ITERATOR_CHUNK_SIZE number of records or
# else we risk SQL2k throwing us an instance of java.sql.DataTruncation
low, high = low_mark +1, GET_ITERATOR_CHUNK_SIZE+(low_mark+1)
elif high_mark:
low, high = 1, high_mark
else:
raise RuntimeError, "Can't paginate when we have no low or high marks!"
cstmt.setInt('@startrow', low)
cstmt.setInt('@endrow', high)
if DEBUG:
print "Shim SQL : ", shim_sql
print "Low mark : ", low
print "High mark: ", high
return cstmt
def _get_temp_table_cols(self, sql, params):
'''
I'm sure there's a *good* way of doing this, but here's a bad way of doing it that works. :)
'''
cursor = self.connection.cursor()
pycur = cursor
while not isinstance(pycur, PyCursor):
pycur = pycur.cursor
jconn = pycur.connection.__connection__
if params:
j_sql = sql.replace("%s", '?')
else:
j_sql = sql
j_pstmt = jconn.prepareStatement(j_sql)
if params:
# handle parameters
from base import coerce_sql2k_type
for i in range(len(params)):
param_obj = coerce_sql2k_type(params[i])
j_pstmt.setObject(i+1, param_obj)
if DEBUG:
print "JDBC statement: ", j_sql
print "JDBC Params: ", params
j_pstmt.execute()
rset = j_pstmt.getResultSet()
meta = rset.getMetaData()
col_count = meta.getColumnCount()
# Generate the (table_alias, col_alias) tuple list
col_tuples = extract_colnames(j_sql)
col_defs = []
for col_num in range(1, col_count+1):
col_dict = {}
col_dict['table_alias'] = col_tuples[col_num-1][0]
col_dict['label'] = meta.getColumnLabel(col_num)
col_dict['name'] = meta.getColumnName(col_num).replace("-", '__')
col_dict['sql_type'] = meta.getColumnTypeName(col_num)
col_dict['prec'] = meta.getPrecision(col_num)
col_dict['scale'] = meta.getScale(col_num)
col_dict['nullable'] = meta.isNullable(col_num) == 1
if col_dict['sql_type'] == 'ntext' and col_dict['prec'] > 8000:
# This looks like a dodgy declaration - just force it to be blank
# and let SQL Server use the default size
col_dict['prec'] = ''
col_defs.append(col_dict)
rset.close()
j_pstmt.close()
# Now - reconstitute the table defintion based on the column definition
col_sql = []
# Note that we have _0xdj_ between the table alias and colname. Use that to coerce the values back
reverse_types = {
'int': "%(table_alias)s_0xdj_%(name)s int " ,
'bit': '%(table_alias)s_0xdj_%(name)s bit ',
'datetime': "%(table_alias)s_0xdj_%(name)s %(sql_type)s " ,
'smalldatetime': "%(table_alias)s_0xdj_%(name)s %(sql_type)s " ,
'numeric': "%(table_alias)s_0xdj_%(name)s %(sql_type)s (%(prec)s, %(scale)s) " ,
'double': "%(table_alias)s_0xdj_%(name)s double precision " ,
'smallint': "%(table_alias)s_0xdj_%(name)s int " ,
'nvarchar': "%(table_alias)s_0xdj_%(name)s %(sql_type)s (%(prec)s) COLLATE SQL_Latin1_General_CP1_CI_AS " ,
'ntext': "%(table_alias)s_0xdj_%(name)s %(sql_type)s (%(prec)s) COLLATE SQL_Latin1_General_CP1_CI_AS " ,
}
for cdef in col_defs:
key = cdef['sql_type'].split()[0]
value = reverse_types[key]
if key == 'ntext' and cdef['prec'] == '':
# Drop the brackets around the ntext size declaration
value = "%(table_alias)s_0xdj_%(name)s %(sql_type)s COLLATE SQL_Latin1_General_CP1_CI_AS "
fragment = value % cdef
if cdef['nullable']:
fragment += "NULL "
else:
fragment += "NOT NULL "
col_sql.append(fragment)
table_sql = '''
CREATE TABLE #temp_table (
djy_sql2k_sort_id int IDENTITY (1, 1) NOT NULL,
%s
)
''' % ', \n'.join(col_sql)
create_cur = self.connection.cursor()
if DEBUG:
print table_sql
create_cur.execute(table_sql)
create_cur.close()
shim_cur = self.connection.cursor()
shim_col_names = ', '.join(["%s_0xdj_%s" % (cdef['table_alias'], cdef['name']) for cdef in col_defs])
shim_sql = "insert into #temp_table (%s) %s" % (shim_col_names, sql)
if DEBUG:
print "Insertion SQL: ", shim_sql
print "Insertion Params: ", params
shim_cur.execute(shim_sql, params)
shim_cur.close()
select_sql = "select %s, djy_sql2k_sort_id from #temp_table" % shim_col_names
if DEBUG:
print "Select SQL: ", select_sql
return select_sql
_classes[QueryClass] = SqlServerQuery
return SqlServerQuery
def extract_colnames(j_sql):
'''
Return 2-tuples of (table_alias, col_name)
'''
j_sql = j_sql.replace("SELECT ",'').strip()
j_sql = j_sql.replace("DISTINCT ",'').strip()
j_sql = j_sql[:j_sql.find(" FROM")]
return _tuplize(_tokenize(j_sql))
def _tokenize(input):
'''
Tokenize input using brackets as a stack and commas to denote terminators
'''
stack = 0
buffer = ''
results = []
for ch in input:
if ch == ',' and stack == 0:
results.append(buffer.strip())
buffer = ''
continue
elif ch == '(':
stack += 1
elif ch == ')':
stack -= 1
buffer += ch
results.append(buffer)
return results
def _tuplize(col_names):
result = []
for cname in col_names:
if ' AS ' in cname:
col_alias = cname.split(' AS ')[-1].strip()
else:
col_alias= cname.strip()
tuple = []
if '.' in col_alias:
for part in col_alias.split("."):
if part.startswith("[") and part.endswith("]"):
tuple.append(part[1:-1])
else:
tuple.append(part)
else:
tuple = ['', col_alias]
result.append(tuple)
return result
| Python |
from django.db.backends import BaseDatabaseIntrospection
from com.ziclix.python.sql import zxJDBC as Database
SQL_AUTOFIELD = -777555
class DatabaseIntrospection(BaseDatabaseIntrospection):
data_types_reverse = {
SQL_AUTOFIELD: 'AutoField',
Database.BIGINT: 'IntegerField',
Database.BIT: 'BooleanField',
Database.CHAR: 'CharField',
Database.DECIMAL: 'DecimalField',
Database.DOUBLE: 'FloatField',
Database.FLOAT: 'FloatField',
#Database.SQL_GUID: 'TextField',
Database.INTEGER: 'IntegerField',
#Database.LONGVARBINARY: ,
#Database.LONGVARCHAR: ,
Database.NUMERIC: 'DecimalField',
Database.REAL: 'FloatField',
Database.SMALLINT: 'SmallIntegerField',
Database.TINYINT: 'SmallIntegerField',
Database.DATETIME: 'DateField',
Database.TIME: 'TimeField',
Database.TIMESTAMP: 'DateTimeField',
#Database.SQL_VARBINARY: ,
Database.VARCHAR: 'TextField',
Database.NCHAR: 'CharField',
Database.LONGNVARCHAR: 'TextField',
Database.NVARCHAR: 'TextField',
}
def get_table_list(self, cursor):
"""
Returns a list of table names in the current database.
"""
# TABLES: http://msdn2.microsoft.com/en-us/library/ms186224.aspx
cursor.execute("SELECT TABLE_NAME FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_TYPE = 'BASE TABLE'")
return [row[0] for row in cursor.fetchall()]
def _is_auto_field(self, cursor, table_name, column_name):
"""
Checks whether column is Identity
"""
# COLUMNPROPERTY: http://msdn2.microsoft.com/en-us/library/ms174968.aspx
cursor.execute("SELECT COLUMNPROPERTY(OBJECT_ID(%s), %s, 'IsIdentity')",
(self.connection.ops.quote_name(table_name), column_name))
return cursor.fetchall()[0][0]
def get_table_description(self, cursor, table_name, identity_check=True):
"""Returns a description of the table, with DB-API cursor.description interface.
The 'auto_check' parameter has been added to the function argspec.
If set to True, the function will check each of the table's fields for the
IDENTITY property (the IDENTITY property is the MSSQL equivalent to an AutoField).
When a field is found with an IDENTITY property, it is given a custom field number
of SQL_AUTOFIELD, which maps to the 'AutoField' value in the DATA_TYPES_REVERSE dict.
"""
columns = [[c[3], c[4], None, c[6], c[6], c[8], c[10]] for c in cursor.columns(table=table_name)]
items = []
for column in columns:
if identity_check and self._is_auto_field(cursor, table_name, column[0]):
column[1] = SQL_AUTOFIELD
if column[1] == Database.NVARCHAR and column[3] < 4000:
column[1] = Database.NCHAR
items.append(column)
return items
def _name_to_index(self, cursor, table_name):
"""
Returns a dictionary of {field_name: field_index} for the given table.
Indexes are 0-based.
"""
return dict([(d[0], i) for i, d in enumerate(self.get_table_description(cursor, table_name, identity_check=False))])
def get_relations(self, cursor, table_name):
"""
Returns a dictionary of {field_index: (field_index_other_table, other_table)}
representing all relationships to the given table. Indexes are 0-based.
"""
# CONSTRAINT_COLUMN_USAGE: http://msdn2.microsoft.com/en-us/library/ms174431.aspx
# CONSTRAINT_TABLE_USAGE: http://msdn2.microsoft.com/en-us/library/ms179883.aspx
# REFERENTIAL_CONSTRAINTS: http://msdn2.microsoft.com/en-us/library/ms179987.aspx
# TABLE_CONSTRAINTS: http://msdn2.microsoft.com/en-us/library/ms181757.aspx
table_index = self._name_to_index(cursor, table_name)
sql = """
SELECT e.COLUMN_NAME AS column_name,
c.TABLE_NAME AS referenced_table_name,
d.COLUMN_NAME AS referenced_column_name
FROM INFORMATION_SCHEMA.TABLE_CONSTRAINTS AS a
INNER JOIN INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS AS b
ON a.CONSTRAINT_NAME = b.CONSTRAINT_NAME
INNER JOIN INFORMATION_SCHEMA.CONSTRAINT_TABLE_USAGE AS c
ON b.UNIQUE_CONSTRAINT_NAME = c.CONSTRAINT_NAME
INNER JOIN INFORMATION_SCHEMA.CONSTRAINT_COLUMN_USAGE AS d
ON c.CONSTRAINT_NAME = d.CONSTRAINT_NAME
INNER JOIN INFORMATION_SCHEMA.CONSTRAINT_COLUMN_USAGE AS e
ON a.CONSTRAINT_NAME = e.CONSTRAINT_NAME
WHERE a.TABLE_NAME = %s AND a.CONSTRAINT_TYPE = 'FOREIGN KEY'"""
cursor.execute(sql, (table_name,))
return dict([(table_index[item[0]], (self._name_to_index(cursor, item[1])[item[2]], item[1]))
for item in cursor.fetchall()])
def get_indexes(self, cursor, table_name):
"""
Returns a dictionary of fieldname -> infodict for the given table,
where each infodict is in the format:
{'primary_key': boolean representing whether it's the primary key,
'unique': boolean representing whether it's a unique index,
'db_index': boolean representing whether it's a non-unique index}
"""
# CONSTRAINT_COLUMN_USAGE: http://msdn2.microsoft.com/en-us/library/ms174431.aspx
# TABLE_CONSTRAINTS: http://msdn2.microsoft.com/en-us/library/ms181757.aspx
pk_uk_sql = """
SELECT b.COLUMN_NAME, a.CONSTRAINT_TYPE
FROM INFORMATION_SCHEMA.TABLE_CONSTRAINTS AS a
INNER JOIN INFORMATION_SCHEMA.CONSTRAINT_COLUMN_USAGE AS b
ON a.CONSTRAINT_NAME = b.CONSTRAINT_NAME AND a.TABLE_NAME = b.TABLE_NAME
WHERE a.TABLE_NAME = %s AND (CONSTRAINT_TYPE = 'PRIMARY KEY' OR CONSTRAINT_TYPE = 'UNIQUE')"""
field_names = [item[0] for item in self.get_table_description(cursor, table_name, identity_check=False)]
indexes, results = {}, {}
cursor.execute(pk_uk_sql, (table_name,))
data = cursor.fetchall()
if data:
results.update(data)
if self.connection.sqlserver_version >= 2005:
# non-unique, non-compound indexes, only in SS2005?
ix_sql = """
SELECT DISTINCT c.name
FROM sys.columns c
INNER JOIN sys.index_columns ic
ON ic.object_id = c.object_id AND ic.column_id = c.column_id
INNER JOIN sys.indexes ix
ON ix.object_id = ic.object_id AND ix.index_id = ic.index_id
INNER JOIN sys.tables t
ON t.object_id = ix.object_id
WHERE ix.object_id IN (
SELECT ix.object_id
FROM sys.indexes ix
GROUP BY ix.object_id, ix.index_id
HAVING count(1) = 1)
AND ix.is_primary_key = 0
AND ix.is_unique_constraint = 0
AND t.name = %s"""
cursor.execute(ix_sql, (table_name,))
for column in [r[0] for r in cursor.fetchall()]:
if column not in results:
results[column] = 'IX'
for field in field_names:
val = results.get(field, None)
indexes[field] = dict(primary_key=(val=='PRIMARY KEY'), unique=(val=='UNIQUE'), db_index=(val=='IX'))
return indexes
| Python |
'''
We define a custom command to install the stored procedures into MSSQL2K
'''
from django.core.management.base import NoArgsCommand
from django.core.management.color import no_style
from optparse import make_option
import sys
class Command(NoArgsCommand):
option_list = NoArgsCommand.option_list
help = "Install the stored procedures required to make SQL Server 2000 play nice"
def handle_noargs(self, **options):
from django.db import connection, transaction, models
from django.conf import settings
from django.core.management.sql import custom_sql_for_model, emit_post_sync_signal
verbosity = int(options.get('verbosity', 1))
interactive = options.get('interactive')
show_traceback = options.get('traceback', False)
self.style = no_style()
cursor = connection.cursor()
print "SQL Server 2000: Installing pagination stored procedure"
cursor.execute(RETURN_PAGE_SQL)
print "SQL Server 2000: Installing regular expression support"
cursor.execute(REGEX_FUNC)
transaction.commit_unless_managed()
RETURN_PAGE_SQL = '''
CREATE PROCEDURE ReturnPage @query varchar(2000), @OrderBy varchar(2000),
@StartRow int, @EndRow int
AS
BEGIN
declare @ColList varchar(2000);
declare @Where varchar(2000);
declare @i int;
declare @i2 int;
declare @tmp varchar(2000);
declare @dec varchar(2000);
declare @f varchar(100);
declare @d varchar(100);
declare @Symbol char(2);
declare @SQL varchar(5000);
declare @Sort varchar(2000);
set @Sort = @OrderBy + ', '
set @dec = ''
set @Where = ''
set @SQL = ''
set @i = charindex(',' , @Sort)
while @i != 0
begin
set @tmp = left(@Sort,@i-1)
set @i2 = charindex(' ', @tmp)
set @f = ltrim(rtrim(left(@tmp,@i2-1)))
set @d = ltrim(rtrim(substring(@tmp,@i2+1,100)))
set @Sort = rtrim(ltrim(substring(@Sort,@i+1,100)))
set @i = charindex(',', @Sort)
set @symbol = case when @d = 'ASC' then '>' else '<' end +
case when @i=0 then '=' else '' end
set @dec = @dec + 'declare @' + @f + ' sql_variant; '
set @ColList = isnull(replace(replace(@colList,'>','='),'<','=') + ' and ','') +
@f + @Symbol + ' @' + @f
set @Where = @Where + ' OR (' + @ColList + ') '
set @SQL = @SQL + ', @' + @f + '= ' + @f
end
set @SQL = @dec + ' ' +
'SET ROWCOUNT ' + convert(varchar(10), @StartRow) + '; ' +
'SELECT ' + substring(@SQL,3,7000) + ' from (' + @query + ') a ORDER BY ' +
@OrderBy + '; ' + 'SET ROWCOUNT ' +
convert(varchar(10), 1 + @EndRow - @StartRow) + '; ' +
'select * from (' + @query + ') a WHERE ' +
substring(@Where,4,7000) + ' ORDER BY ' + @OrderBy + '; SET ROWCOUNT 0;'
exec(@SQL)
END
'''
REGEX_FUNC = '''
CREATE FUNCTION dbo.regex
(
@source varchar(5000),
@regexp varchar(1000),
@ignorecase bit = 0
)
RETURNS bit
AS
BEGIN
DECLARE @hr integer
DECLARE @objRegExp integer
DECLARE @objMatches integer
DECLARE @objMatch integer
DECLARE @count integer
DECLARE @results bit
EXEC @hr = sp_OACreate 'VBScript.RegExp', @objRegExp OUTPUT
IF @hr <> 0 BEGIN
SET @results = 0
RETURN @results
END
EXEC @hr = sp_OASetProperty @objRegExp, 'Pattern', @regexp
IF @hr <> 0 BEGIN
SET @results = 0
RETURN @results
END
EXEC @hr = sp_OASetProperty @objRegExp, 'Global', false
IF @hr <> 0 BEGIN
SET @results = 0
RETURN @results
END
EXEC @hr = sp_OASetProperty @objRegExp, 'IgnoreCase', @ignorecase
IF @hr <> 0 BEGIN
SET @results = 0
RETURN @results
END
EXEC @hr = sp_OAMethod @objRegExp, 'Test', @results OUTPUT, @source
IF @hr <> 0 BEGIN
SET @results = 0
RETURN @results
END
EXEC @hr = sp_OADestroy @objRegExp
IF @hr <> 0 BEGIN
SET @results = 0
RETURN @results
END
RETURN @results
END
'''
| Python |
from java.lang import Class
from java.lang import System
from java.io import PrintWriter
from java.sql import DriverManager
from java.sql import Connection
from java.sql import SQLException
# Here are the dbcp-specific classes.
# Note that they are only used in the setupDriver
# method. In normal use, your classes interact
# only with the standard JDBC API
from org.apache.commons.pool.impl import GenericObjectPool
from org.apache.commons.dbcp import PoolableConnectionFactory
from org.apache.commons.dbcp import BasicDataSource
from org.apache.commons.dbcp import DataSourceConnectionFactory
import time
class ManualPoolingDriver(object):
def __init__(self, connectURI, username, password, pool_name):
self.connectionPool = GenericObjectPool(None)
self._pool_name = pool_name
source = BasicDataSource()
source.setUrl(connectURI)
source.setUsername(username)
source.setPassword(password)
source.setInitialSize(1) # Number of connections to start with
source.setMinIdle(5) # Allow a bottom of 5 idle connections
source.setMaxActive(10) # Max of 10 database connection
source.setDefaultTransactionIsolation(Connection.TRANSACTION_READ_COMMITTED)
source.setMinEvictableIdleTimeMillis(500)
self.connectionFactory = DataSourceConnectionFactory(source)
# Now we'll create the PoolableConnectionFactory, which wraps
# the "real" Connections created by the ConnectionFactory with
# the classes that implement the pooling functionality.
self.poolableConnectionFactory = PoolableConnectionFactory(self.connectionFactory,
self.connectionPool,
None,
None,
False,
True)
# Finally, we create the PoolingDriver itself...
Class.forName("org.apache.commons.dbcp.PoolingDriver")
driver = DriverManager.getDriver("jdbc:apache:commons:dbcp:")
# ...and register our pool with it.
driver.registerPool(self._pool_name, self.connectionPool)
# Now we can just use the connect string "jdbc:apache:commons:dbcp:<pool_name>"
# to access our pool of Connections.
def printDriverStats(self):
driver = DriverManager.getDriver("jdbc:apache:commons:dbcp:")
connectionPool = driver.getConnectionPool(self._pool_name)
print "NumActive: " + str(connectionPool.getNumActive())
print "NumIdle: " + str(connectionPool.getNumIdle())
def shutdownDriver(self):
driver = DriverManager.getDriver("jdbc:apache:commons:dbcp:")
driver.closePool(self._pool_name)
| Python |
from django.db.backends import BaseDatabaseOperations
import datetime
import time
import query
class DatabaseOperations(BaseDatabaseOperations):
# Define the parts of an ODBC date string
# so we can do substring operations to match
DATE_PARTS = {'year': (1,4),
'month': (6,2),
'day': (9,2)}
def regex_lookup(self, lookup_type):
"""
Returns the string to use in a query when performing regular expression
lookups (using "regex" or "iregex"). The resulting string should
contain a '%s' placeholder for the column being searched against.
If the feature is not supported (or part of it is not supported), a
NotImplementedError exception can be raised.
"""
if lookup_type == 'regex':
ignore_case = 0
else:
ignore_case = 1
return "dbo.regex(%%s, %%s, %s) = 1" % ignore_case
def start_transaction_sql(self):
"""
Returns the SQL statement required to start a transaction.
"""
return "BEGIN TRANSACTION"
def date_extract_sql(self, lookup_type, field_name):
start, end = self.DATE_PARTS[lookup_type]
return "CONVERT(INT, SUBSTRING(%s, %s, %s))" % (self.quote_name(field_name), start, end)
def _unquote_fieldname(self, fieldname):
'''
Try to unquote the fieldname so that SQL Server doesn't assign a
weird semi-random name to the converted column.
We *only* return the column name part though - we drop the table name.
This method is really only used by the date_trunc_sql method and isn't meant
for any other use.
'''
assert fieldname.startswith('[') and fieldname.endswith(']')
short_name = fieldname.split('.')[-1][1:-1]
return short_name
def date_trunc_sql(self, lookup_type, field_name):
quoted_field_name = self.quote_name(field_name)
short_name = self.quote_name(self._unquote_fieldname(quoted_field_name))
sql_dict = {'name': quoted_field_name, 'short_name': short_name}
if lookup_type == 'year':
return "CONVERT(datetime, CONVERT(varchar, DATEPART(year, %(name)s)) + '/01/01') AS %(short_name)s" % sql_dict
if lookup_type == 'month':
return "CONVERT(datetime, CONVERT(varchar, DATEPART(year, %(name)s)) + '/' + CONVERT(varchar, DATEPART(month, %(name)s)) + '/01') AS %(short_name)s" %\
sql_dict
if lookup_type == 'day':
return "CONVERT(datetime, CONVERT(varchar(12), %(name)s)) AS %(short_name)s" % sql_dict
def last_insert_id(self, cursor, table_name, pk_name):
cursor.execute("SELECT CAST(IDENT_CURRENT(%s) AS bigint)", [self.quote_name(table_name)])
return cursor.fetchone()[0]
def no_limit_value(self):
return None
def prep_for_like_query(self, x):
"""Prepares a value for use in a LIKE query."""
from django.utils.encoding import smart_unicode
return (
smart_unicode(x).\
replace("\\", "\\\\").\
replace("%", "\%").\
replace("_", "\_").\
replace("[", "\[").\
replace("]", "\]")
)
def query_class(self, DefaultQueryClass):
return query.query_class(DefaultQueryClass)
def quote_name(self, name):
if 'CONVERT(' in name:
# SQL Server has a fragile parser. If we'v already applied CONVERT on a column, treat this
# column as pre-quoted. No - it doesn't make any sense. Don't think too hard about this.
return name
if name.startswith('[') and name.endswith(']'):
return name # already quoted
return '[%s]' % name
def random_function_sql(self):
return 'RAND()'
def sql_flush(self, style, tables, sequences):
"""
Returns a list of SQL statements required to remove all data from
the given database tables (without actually removing the tables
themselves).
The `style` argument is a Style object as returned by either
color_style() or no_style() in django.core.management.color.
Originally taken from django-pyodbc project.
"""
if not tables:
return list()
qn = self.quote_name
# Cannot use TRUNCATE on tables that are referenced by a FOREIGN KEY; use DELETE instead.
# (which is slow)
from django.db import connection
cursor = connection.cursor()
# Try to minimize the risks of the braindeaded inconsistency in
# DBCC CHEKIDENT(table, RESEED, n) behavior.
seqs = []
for seq in sequences:
cursor.execute("SELECT COUNT(*) FROM %s" % qn(seq["table"]))
rowcnt = cursor.fetchone()[0]
elem = dict()
if rowcnt:
elem['start_id'] = 0
else:
elem['start_id'] = 1
elem.update(seq)
seqs.append(elem)
cursor.execute("SELECT TABLE_NAME, CONSTRAINT_NAME FROM INFORMATION_SCHEMA.TABLE_CONSTRAINTS")
fks = cursor.fetchall()
sql_list = list()
# Turn off constraints.
sql_list.extend(['ALTER TABLE %s NOCHECK CONSTRAINT %s;' % (
qn(fk[0]), qn(fk[1])) for fk in fks])
# Delete data from tables.
sql_list.extend(['%s %s %s;' % (
style.SQL_KEYWORD('DELETE'),
style.SQL_KEYWORD('FROM'),
style.SQL_FIELD(qn(t))
) for t in tables])
# Reset the counters on each table.
sql_list.extend(['%s %s (%s, %s, %s) %s %s;' % (
style.SQL_KEYWORD('DBCC'),
style.SQL_KEYWORD('CHECKIDENT'),
style.SQL_FIELD(qn(seq["table"])),
style.SQL_KEYWORD('RESEED'),
style.SQL_FIELD('%d' % seq['start_id']),
style.SQL_KEYWORD('WITH'),
style.SQL_KEYWORD('NO_INFOMSGS'),
) for seq in seqs])
# Turn constraints back on.
sql_list.extend(['ALTER TABLE %s CHECK CONSTRAINT %s;' % (
qn(fk[0]), qn(fk[1])) for fk in fks])
return sql_list
def tablespace_sql(self, tablespace, inline=False):
return "ON %s" % self.quote_name(tablespace)
def value_to_db_datetime(self, value):
if value is None:
return None
if value.tzinfo is not None:
raise ValueError("SQL Server 2005 does not support timezone-aware datetimes.")
# SQL Server 2005 doesn't support microseconds
return value.replace(microsecond=0)
def value_to_db_time(self, value):
# MS SQL 2005 doesn't support microseconds
#...but it also doesn't really suport bare times
if value is None:
return None
return value.replace(microsecond=0)
def value_to_db_decimal(self, value, max_digits, decimal_places):
if value is None or value == '':
return None
return value # Should be a decimal type (or string)
def year_lookup_bounds(self, value):
"""
Returns a two-elements list with the lower and upper bound to be used
with a BETWEEN operator to query a field value using a year lookup
`value` is an int, containing the looked-up year.
"""
first = '%s-01-01 00:00:00'
second = '%s-12-31 23:59:59'
return [first % value, second % value]
def field_cast_sql(self, db_type):
"""
Given a column type (e.g. 'BLOB', 'VARCHAR'), returns the SQL necessary
to cast it before using it in a WHERE statement. Note that the
resulting string should contain a '%s' placeholder for the column being
searched against.
"""
if db_type is None:
return '%s'
if 'DATETIME' == db_type.upper():
# We need to convert date and datetime columns into
# ODBC canonical format.
# See: http://msdn.microsoft.com/en-us/library/ms187928.aspx
return "CONVERT(varchar, %s, 120)"
elif 'SMALLDATETIME' == db_type.upper():
return "SUBSTRING(CONVERT(varchar, %s, 120), 1, 10)"
return '%s'
| Python |
from django.db.backends.creation import BaseDatabaseCreation
class DatabaseCreation(BaseDatabaseCreation):
'''
Overloaded bits of the database creation code
'''
data_types = {
'AutoField': 'int IDENTITY (1, 1)',
'BooleanField': 'bit',
'CharField': 'nvarchar(%(max_length)s)',
'CommaSeparatedIntegerField': 'nvarchar(%(max_length)s)',
'DateField': 'smalldatetime',
'DateTimeField': 'datetime',
'DecimalField': 'numeric(%(max_digits)s, %(decimal_places)s)',
'FileField': 'nvarchar(%(max_length)s)',
'FilePathField': 'nvarchar(%(max_length)s)',
'FloatField': 'double precision',
'IntegerField': 'int',
'IPAddressField': 'nvarchar(15)',
'NullBooleanField': 'bit',
'OneToOneField': 'int',
'PositiveIntegerField': 'int CHECK ([%(column)s] >= 0)',
'PositiveSmallIntegerField': 'smallint CHECK ([%(column)s] >= 0)',
'SlugField': 'nvarchar(%(max_length)s)',
'SmallIntegerField': 'smallint',
'TextField': 'ntext',
'TimeField': 'datetime',
}
def __init__(self, connection):
super(DatabaseCreation,self).__init__(connection)
# Keep track of all unique nullable fields
self.unique_fields = []
# We need to keep track of all seen models and created models for
# ourself so that we can properly generate all the constraint triggers
self._seen_models = set()
self._created_models = set()
self._trigger_sql = set()
def create_test_db(self, verbosity=1, autoclobber=False):
result = super(DatabaseCreation, self).create_test_db(verbosity, autoclobber)
# Force the SQL2k command to run now.
from jtds.mssql2kext.management.commands import sql2kext
sql2kext.Command().handle_noargs()
return result
def _destroy_test_db(self, test_database_name, verbosity):
cursor = self.connection.cursor()
if not self.connection.connection.autocommit:
self.connection.connection.commit()
self.connection.connection.autocommit = True
cursor.execute("ALTER DATABASE %s SET SINGLE_USER WITH ROLLBACK IMMEDIATE " % self.connection.ops.quote_name(test_database_name))
cursor.execute("DROP DATABASE %s" %self.connection.ops.quote_name(test_database_name))
self.connection.close()
def sql_for_many_to_many(self, model, style):
"""
We need to inject the trigger code for a model after all the tables for this application have been
created.
The code goes in this method only because it's known that the syncdb command in
django.core.management.commands.syncdb call this last.
A better option would be to have a signal hook after all models have been
created, but before the the applications are signalled so that the database
backend can respond to creation prior to individual applications respond.
"""
final_output = super(DatabaseCreation, self).sql_for_many_to_many(model, style)
from django.db import models
opts = model._meta
app_label = opts.app_label
app = [app for app in models.get_apps() if app.__name__.split('.')[-2] == app_label][0]
app_model_set = set(models.get_models(app))
# Wait until the app_model_set is finished loading
if app_model_set != (app_model_set & self._seen_models | set([model])):
return final_output
# This is the last model - we can safely assume it's ok to
# inject all the constraint trigger code now
model_fkeys = {}
for model in app_model_set:
opts = model._meta
model_fkeys[model] = []
for f in opts.local_fields:
if f.rel:
model_fkeys[model].append(f)
qn = self.connection.ops.quote_name
for model, model_keys in model_fkeys.items():
sql_block = []
# For each model, we want the list of all foreign keys
# to clear out references to other objects
# and to clear all references
tmpl = '''UPDATE %(table)s SET %(this_to_rel)s = NULL where %(this_pkey)s in (SELECT %(this_pkey)s from deleted)'''
opts = model._meta
table = opts.db_table
this_pkey = [f for f in opts.local_fields if f.primary_key][0].column
for model_f in model_keys:
sql_dict = {'table': qn(table),
'this_to_rel': qn(model_f.column),
'this_pkey': qn(this_pkey),}
if model_f.null:
sql_block.append(tmpl % sql_dict)
# Generate all inbound relationships and clear the foreign keys
for inbound_model in app_model_set:
inbound_rels = [(inbound_model, f) for f in model_fkeys[inbound_model] if f.rel.to == model]
for in_model, in_f in inbound_rels:
tmpl = '''UPDATE %(other_table)s SET %(fkey)s = NULL where %(fkey)s in (SELECT %(this_pkey)s from deleted)'''
rel_opts = in_model._meta
other_table = rel_opts.db_table
sql_dict = {'other_table': qn(other_table),
'fkey': qn(in_f.column),
'this_pkey': qn(this_pkey),
}
if in_f.null:
sql_block.append(tmpl % sql_dict)
trigger_name = '%s_%x' % (table, abs(hash(table)))
instead_of_sql = """
CREATE TRIGGER %(instead_trigger_name)s ON %(table)s
INSTEAD OF DELETE
AS
BEGIN
%(sql)s
DELETE FROM %(table)s WHERE %(this_pkey)s IN (SELECT %(this_pkey)s FROM deleted)
print '%(escaped_sql)s'
END
;
""" % {
'instead_trigger_name': qn('instead_%s' % trigger_name),
'table': qn(table),
'sql': '\n'.join([' %s' % stmt for stmt in sql_block]),
'escaped_sql': ('\n'.join([' %s' % stmt for stmt in sql_block])).replace("'", "\\'"),
'this_pkey': qn(this_pkey),
}
if instead_of_sql not in self._trigger_sql:
# We only want to generate the instead trigger if there is an actual
# code block
if len(sql_block) <> 0:
self._trigger_sql.add(instead_of_sql)
final_output.append(instead_of_sql)
return final_output
def sql_for_pending_references(self, model, style, pending_references):
"""
SQL Server 2000 needs to inject trigger code to emulate deferrable
constraints.
On object delete, we manually set the foreign keys to NULL with an
INSTEAD OF DELETE trigger, and then actually delete the record in the
AFTER DELETE trigger.
If the columns are specified with NOT NULL constraints, the trigger will fail
and will exhibit the correct behaviour. If NULL is allowed, this will
allow us to emulate DEFERRABLE constraints.
Note that SQL Server 2000 will automatically delete triggers that are
bound to tables when the table is dropped.
"""
import copy
# Make a shallow copy of the pending_references
pending_references_orig = copy.copy(pending_references)
final_output = super(DatabaseCreation, self).sql_for_pending_references(model, style, pending_references)
return final_output
def sql_create_model(self, model, style, known_models=set()):
'''
Returns the SQL required to create a single model, as a tuple of:
(list_of_sql, pending_references_dict)
overload this to create a view with SCHEMABINDING applied to the original table
to support fields marked as unique and nullable
The key differences between this and the super class implementation is that
we do not generate unique constriants for nullable field types, or
unique_together fieldsets.
'''
self._seen_models.update(known_models)
self._created_models.add(model)
from django.db import models
opts = model._meta
final_output = []
table_output = []
pending_references = {}
qn = self.connection.ops.quote_name
for f in opts.local_fields:
col_type = f.db_type()
tablespace = f.db_tablespace or opts.db_tablespace
if col_type is None:
# Skip ManyToManyFields, because they're not represented as
# database columns in this table.
continue
# Make the definition (e.g. 'foo VARCHAR(30)') for this field.
field_output = [style.SQL_FIELD(qn(f.column)),
style.SQL_COLTYPE(col_type)]
field_output.append(style.SQL_KEYWORD('%sNULL' % (not f.null and 'NOT ' or '')))
if f.primary_key:
field_output.append(style.SQL_KEYWORD('PRIMARY KEY'))
elif f.unique:
if not f.null:
field_output.append(style.SQL_KEYWORD('UNIQUE'))
self.unique_fields.append(f)
if tablespace and f.unique:
# We must specify the index tablespace inline, because we
# won't be generating a CREATE INDEX statement for this field.
field_output.append(self.connection.ops.tablespace_sql(tablespace, inline=True))
if f.rel:
ref_output, pending = self.sql_for_inline_foreign_key_references(f, known_models, style)
if pending:
pr = pending_references.setdefault(f.rel.to, []).append((model, f))
else:
field_output.extend(ref_output)
table_output.append(' '.join(field_output))
if opts.order_with_respect_to:
table_output.append(style.SQL_FIELD(qn('_order')) + ' ' + \
style.SQL_COLTYPE(models.IntegerField().db_type()) + ' ' + \
style.SQL_KEYWORD('NULL'))
for field_constraints in opts.unique_together:
contraint_fields = [opts.get_field(f) for f in field_constraints]
null_allowed = [f for f in contraint_fields if f.null]
# Only do an inline UNIQUE constraint if none of the unique_together columns
# allow nulls. Otherwise - let the schemabinding hack build the unique index
if len(null_allowed) == 0:
table_output.append(style.SQL_KEYWORD('UNIQUE') + ' (%s)' % \
", ".join([style.SQL_FIELD(qn(opts.get_field(f).column)) for f in field_constraints]))
full_statement = [style.SQL_KEYWORD('CREATE TABLE') + ' ' + style.SQL_TABLE(qn(opts.db_table)) + ' (']
for i, line in enumerate(table_output): # Combine and add commas.
full_statement.append(' %s%s' % (line, i < len(table_output)-1 and ',' or ''))
full_statement.append(')')
if opts.db_tablespace:
full_statement.append(self.connection.ops.tablespace_sql(opts.db_tablespace))
full_statement.append(';')
final_output.append('\n'.join(full_statement))
if self.unique_fields:
final_output.extend(self._create_schemabinding_view(style, opts))
if opts.has_auto_field:
# Add any extra SQL needed to support auto-incrementing primary keys.
auto_column = opts.auto_field.db_column or opts.auto_field.name
autoinc_sql = self.connection.ops.autoinc_sql(opts.db_table, auto_column)
if autoinc_sql:
for stmt in autoinc_sql:
final_output.append(stmt)
return final_output, pending_references
def _create_schemabinding_view(self, style, opts):
'''
Walk the list of unique_fields and generate a view to enforce
uniqueness on
'''
# Do a quick check to see if we have nullable and unique fields
# defined
if len([f for f in self.unique_fields if f.null and f.unique]) == 0:
return []
sql_stmts = []
#sql_stmts.append("-- Start SCHEMABINDING hack for %s" % style.SQL_TABLE(qn(db_table)))
db_table, local_fields = opts.db_table, opts.local_fields
qn = self.connection.ops.quote_name
d ={'view_name': style.SQL_TABLE(qn("%s_vw" % db_table)),
'fields': ', \n '.join([" %s" % style.SQL_FIELD(qn(f.column)) for f in local_fields]),
'table_name': style.SQL_TABLE(qn(db_table)),
'null_parts': ' OR\n '.join(['%s IS NOT NULL' % style.SQL_FIELD(qn(f.column)) for f in local_fields if f.null]),
}
sql_parts = []
sql_parts.append("CREATE VIEW %(view_name)s WITH SCHEMABINDING " % d)
sql_parts.append(" AS")
sql_parts.append(" SELECT")
sql_parts.append(" %(fields)s" % d)
sql_parts.append(" FROM")
sql_parts.append(" [dbo].%(table_name)s" % d)
sql_parts.append(" WHERE")
sql_parts.append(" %(null_parts)s" % d)
sql_stmts.append('\n'.join(sql_parts))
sql_parts = []
# Now create all the indices
unique_nullable = [f for f in local_fields if f.null and f.unique]
for i, f in enumerate(unique_nullable):
d ={'vidx_name' : style.SQL_TABLE(qn("%s_vidx_%s" % (db_table, i))),
'idx_name' : style.SQL_TABLE(qn("%s_idx_%s" % (db_table, i))),
'table_name': style.SQL_TABLE(qn(db_table)),
'view_name': style.SQL_TABLE(qn("%s_vw" % db_table)),
'col_name': style.SQL_FIELD(qn(f.column)),
}
if i == 0:
sql_stmts.append("CREATE UNIQUE CLUSTERED INDEX %(vidx_name)s on %(view_name)s (%(col_name)s);" % d)
else:
sql_stmts.append("CREATE UNIQUE INDEX %(vidx_name)s on %(view_name)s (%(col_name)s);" % d)
sql_stmts.append("CREATE INDEX %(idx_name)s on %(table_name)s (%(col_name)s);" % d)
# To synthesize unique_together over fields where NULLs are allowed,
# we create a view per unique_together clause
for fc_idx, field_constraints in enumerate(opts.unique_together):
fields = [opts.get_field(f) for f in field_constraints]
unique_together_fields = set([f for f in opts.local_fields if f.null]).intersection(set(fields))
null_bits = ['%s IS NOT NULL' % style.SQL_FIELD(qn(f.column)) for f in fields if f.null]
if len(null_bits) == 0:
# No NULLable columns, skip this
continue
d ={'view_name': style.SQL_TABLE(qn("%s_%s_utvw" % (db_table, fc_idx))),
'fields': ', \n '.join([style.SQL_FIELD(qn(f.column)) for f in fields]),
'table_name': style.SQL_TABLE(qn(db_table)),
'null_parts': ' OR\n '.join(null_bits),
}
sql_parts = []
sql_parts.append("CREATE VIEW %(view_name)s WITH SCHEMABINDING " % d)
sql_parts.append(" AS")
sql_parts.append(" SELECT")
sql_parts.append(" %(fields)s" % d)
sql_parts.append(" FROM")
sql_parts.append(" [dbo].%(table_name)s" % d)
sql_parts.append(" WHERE")
sql_parts.append(" %(null_parts)s" % d)
sql_stmts.append('\n'.join(sql_parts))
d ={'vidx_name' : style.SQL_TABLE(qn("%s_utidx_%s" % (db_table, fc_idx))),
'view_name': style.SQL_TABLE(qn("%s_%s_utvw" % (db_table, fc_idx))),
'table_name': style.SQL_TABLE(qn(db_table)),
'col_names': ', '.join([style.SQL_FIELD(qn(f.column)) for f in fields]),
}
# Create a unique clustered index on the VIEW to enforce uniqueness
# Note that the view itself will filter out the NULLable column
sql_stmts.append("CREATE UNIQUE CLUSTERED INDEX %(vidx_name)s on %(view_name)s (%(col_names)s);" % d)
# Now, finally create a NON-unique index across the unique_together fields on the TABLE
# to provide index speed
d ={'idx_name' : style.SQL_TABLE(qn("%s_%s_ut_idx" % (db_table, fc_idx))),
'table_name': style.SQL_TABLE(qn(db_table)),
'col_name': ', '.join([style.SQL_FIELD(qn(f.column)) for f in fields]),
}
sql_stmts.append("CREATE INDEX %(idx_name)s on %(table_name)s (%(col_name)s);" % d)
#sql_stmts.append("-- END SCHEMABINDING hack for %s" % style.SQL_TABLE(qn(db_table)))
"""
Now for some closure magic. We just grab the first field in the local_fields list
and obtain the post_create_sql code, substituting in a lambda function if nothing
is available.
We apply a closure and extends the post_create_sql method with the SQL we've just
generated to synthesize proper UNIQUE+NULL capability
"""
# We need to bind the sql_stmts to the first field
field = opts.local_fields[0]
def wrap_statements(old_post_create_sql, stmts):
def closure(style, db_table):
result = []
if old_post_create_sql:
result.extend([sql for sql in old_post_create_sql(style, db_table)])
result.extend(stmts)
return result
return closure
old_func = getattr(field, 'post_create_sql', lambda x, y : [])
field.post_create_sql = wrap_statements(old_func, sql_stmts)
return []
# Stored procedure code
| Python |
from pprint import pprint
try:
# Force the database driver to load
from java.lang import Class
cls = Class.forName("net.sourceforge.jtds.jdbc.Driver").newInstance()
from jtds.mssql2k.pool import ManualPoolingDriver
from com.ziclix.python.sql import zxJDBC as Database
except ImportError, e:
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured("Error loading zxJDBC module: %s" % e)
db_dict = {
'DATABASE_NAME': 'test_zxjdbc.jtds',
'DATABASE_HOST': 'localhost',
'DATABASE_PORT': 1433,
}
pool_name = 'jdbc_pool'
print "Registering driver for : %s to [%s]" % (str(db_dict), pool_name)
driver = ManualPoolingDriver("jdbc:jtds:sqlserver://%(DATABASE_HOST)s:%(DATABASE_PORT)s/%(DATABASE_NAME)s" % db_dict,
'sa',
'sa',
pool_name,
)
from java.sql import DriverManager
from java.sql import Types
url='jdbc:apache:commons:dbcp:%s' % pool_name
conn = DriverManager.getConnection(url)
proc = conn.prepareCall("returnpage(?, ?, ?, ?)");
proc.setString('@query', "select * from foo_simple");
proc.setString('@orderby', "auto asc");
proc.setInt('@startrow', 5);
proc.setInt('@endrow', 12);
proc.execute()
rset = proc.getResultSet()
meta = rset.getMetaData()
type_dict = {}
for key in dir(Types):
type_dict[getattr(Types, key)]=key
pprint (type_dict)
getter = {1: rset.getString,
2: rset.getLong,
3: rset.getBigDecimal,
4: rset.getInt,
5: rset.getInt,
6: rset.getFloat,
7: rset.getFloat,
8: rset.getDouble,
12: rset.getString,
16: rset.getBoolean,
70: rset.getString,
91: rset.getDate,
92: rset.getTime,
93: rset.getTimestamp,
}
col_count = meta.getColumnCount()
col_getter = {}
for i in range(1, col_count+1):
col_getter[i] = getter[meta.getColumnType(i)]
while rset.next():
for colnum in range(1, col_count+1):
value = col_getter[colnum](colnum)
print type(value), value,"|",
print
print '-' * 20
| Python |
"""
jTDS/MSSQL database backend for Django.
Django uses this if the DATABASE_ENGINE setting is empty (None or empty string).
Each of these API functions, except connection.close(), raises
ImproperlyConfigured.
"""
try:
# Force the database driver to load
from java.lang import Class
cls = Class.forName("net.sourceforge.jtds.jdbc.Driver").newInstance()
from pool import ManualPoolingDriver
from com.ziclix.python.sql import zxJDBC as Database
from com.ziclix.python.sql import zxJDBC
from com.ziclix.python.sql import PyStatement, PyExtendedCursor, PyCursor, PyConnection
except ImportError, e:
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured("Error loading zxJDBC module: %s" % e)
from django.core.exceptions import ImproperlyConfigured
from django.db.backends import *
from django.db.backends import BaseDatabaseFeatures, BaseDatabaseValidation
from django.conf import settings
from pool import ManualPoolingDriver
from doj.backends.zxjdbc.common import zxJDBCDatabaseWrapper
from operations import DatabaseOperations
from introspection import DatabaseIntrospection
from creation import DatabaseCreation
if not hasattr(settings, "DATABASE_COLLATION"):
settings.DATABASE_COLLATION = 'Latin1_General_CI_AS'
def complain(*args, **kwargs):
raise ImproperlyConfigured, "You haven't set the DATABASE_ENGINE setting yet."
DatabaseError = zxJDBC.DatabaseError
IntegrityError = zxJDBC.IntegrityError
class DatabaseClient(BaseDatabaseClient):
runshell = complain
class DatabaseWrapper(zxJDBCDatabaseWrapper):
jdbc_url_pattern = "jdbc:jtds:sqlserver://%(DATABASE_HOST)s%(DATABASE_PORT)s/%(DATABASE_NAME)s"
driver_class_name = "net.sourceforge.jtds.jdbc.Driver"
operators = {
# Since '=' is used not only for string comparision there is no way
# to make it case (in)sensitive. It will simply fallback to the
# database collation.
'exact': '= %s ',
'iexact': "= UPPER(%s) ",
'contains': "LIKE %s ESCAPE '\\' COLLATE " + settings.DATABASE_COLLATION,
'icontains': "LIKE UPPER(%s) ESCAPE '\\' COLLATE "+ settings.DATABASE_COLLATION,
'gt': '> %s',
'gte': '>= %s',
'lt': '< %s',
'lte': '<= %s',
'startswith': "LIKE %s ESCAPE '\\' COLLATE " + settings.DATABASE_COLLATION,
'endswith': "LIKE %s ESCAPE '\\' COLLATE " + settings.DATABASE_COLLATION,
'istartswith': "LIKE UPPER(%s) ESCAPE '\\' COLLATE " + settings.DATABASE_COLLATION,
'iendswith': "LIKE UPPER(%s) ESCAPE '\\' COLLATE " + settings.DATABASE_COLLATION,
}
def _register_driver(self):
# Configure the pooled connection driver
if self._LAST_DATABASE_NAME == settings.DATABASE_NAME:
return "jdbc_pool_%s" % self._db_count
self._db_count += 1
pool_name = "jdbc_pool_%s" % self._db_count
db_dict = {
'DATABASE_NAME': settings.DATABASE_NAME,
'DATABASE_HOST': settings.DATABASE_HOST or 'localhost',
'DATABASE_PORT': settings.DATABASE_PORT or 1433,
}
self.driver = ManualPoolingDriver("jdbc:jtds:sqlserver://%(DATABASE_HOST)s:%(DATABASE_PORT)s/%(DATABASE_NAME)s" % db_dict,
settings.DATABASE_USER,
settings.DATABASE_PASSWORD,
pool_name,
)
self._LAST_DATABASE_NAME = settings.DATABASE_NAME
return pool_name
def _cursor(self, settings):
'''
Implementation specific cursor
'''
new_conn = False
if self.connection is None:
# TODO: Refactor this DBCP pool setup to zxJDBCCursorWrapper
new_conn = True
self.connection = self.new_jndi_connection()
if self.connection is None:
pool_name = self._register_driver()
if not settings.DATABASE_NAME:
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured("You need to specify DATABASE_NAME in your Django settings file.")
url='jdbc:apache:commons:dbcp:%s' % pool_name
self.connection = Database.connect(url, None, None, 'org.apache.commons.dbcp.PoolingDriver')
cursor = self.connection.cursor()
if new_conn:
cursor.execute("SET DATEFORMAT ymd")
# SQL Server violates the SQL standard w.r.t handling NULL values in UNIQUE columns.
# We work around this by creating schema bound views on tables with with nullable unique columns
# but we need to modify the cursor to abort if the view has problems.
# See http://blogs.msdn.com/sqlcat/archive/2005/12/20/506138.aspx
cursor.execute("SET ARITHABORT ON")
cursor.execute("SET CONCAT_NULL_YIELDS_NULL ON")
cursor.execute("SET QUOTED_IDENTIFIER ON")
cursor.execute("SET ANSI_NULLS ON")
cursor.execute("SET ANSI_PADDING ON")
cursor.execute("SET ANSI_WARNINGS ON")
cursor.execute("SET NUMERIC_ROUNDABORT OFF")
cursor.execute("SET TRANSACTION ISOLATION LEVEL SERIALIZABLE")
# jTDS can't execute some sql like CREATE DATABASE etc. in
# Multi-statement, so we need to commit the above SQL sentences to
# avoid this
return CursorWrapper(cursor)
def __init__(self, autocommit=False, **kwargs):
super(DatabaseWrapper, self).__init__(autocommit=autocommit, **kwargs)
self._LAST_DATABASE_NAME = None
self.connection = None
self._db_count = 0
self.features = DatabaseFeatures()
self.ops = DatabaseOperations()
self.client = DatabaseClient() # XXX: No client is supported yet
self.creation = DatabaseCreation(self) # Basic type declarations for creating tables
self.introspection = DatabaseIntrospection(self)
self.validation = BaseDatabaseValidation() # XXX: No real database validation yet
class DatabaseFeatures(BaseDatabaseFeatures):
uses_custom_query_class = True
class CursorWrapper(object):
"""
A wrapper around the zxJDBC's cursor that takes in account some zxJDBC
DB-API 2.0 implementation and common ODBC driver particularities.
"""
def __init__(self, cursor):
self.cursor = cursor
def format_sql(self, sql):
# zxjdbc uses '?' instead of '%s' as parameter placeholder.
if "%s" in sql:
sql = sql.replace('%s', '?')
return sql
def format_params(self, params):
fp = []
for p in params:
p = coerce_sql2k_type(p)
fp.append(p)
return tuple(fp)
def execute(self, sql, params=()):
sql = self.format_sql(sql)
params = self.format_params(params)
return self.cursor.execute(sql, params)
def executemany(self, sql, params_list):
sql = self.format_sql(sql)
# zxjdbc's cursor.executemany() doesn't support an empty param_list
if not params_list:
if '?' in sql:
return
else:
raw_pll = params_list
params_list = [self.format_params(p) for p in raw_pll]
return self.cursor.executemany(sql, params_list)
def format_results(self, rows):
"""
Decode data coming from the database if needed and convert rows to tuples
(zxjdbc Rows are not sliceable).
"""
fr = []
for row in rows:
fr.append(row)
return tuple(fr)
def fetchone(self):
row = self.cursor.fetchone()
if row is not None:
return self.format_results(row)
return row
def fetchmany(self, chunk):
return [self.format_results(row) for row in self.cursor.fetchmany(chunk)]
def fetchall(self):
return [self.format_results(row) for row in self.cursor.fetchall()]
def __getattr__(self, attr):
if attr in self.__dict__:
return self.__dict__[attr]
else:
return getattr(self.cursor, attr)
def coerce_sql2k_type(p):
'''
Need to coerce some python types to jTDS friendly types
so that PreparedStatement::setObject() can work properly
'''
if isinstance(p, type(True)):
if p:
return 1
else:
return 0
elif isinstance(p, type(5L)):
# zxJDBC doesn't like injecting long types, or maybe it
# actually depends on the underlying SQL datatype..
# Need to figure out a better fix for this
if p == int(p):
return int(p)
else:
raise RuntimeError, "SQL Serer 2000 +jTDS can't seem to handle long values. Found : [%s]" % p
return p
| Python |
import copy
from django.db.backends.creation import BaseDatabaseCreation
from django.db.backends.postgresql.creation import DatabaseCreation as PostgresqlDatabaseCreation
class DatabaseCreation(BaseDatabaseCreation):
def __init__(self, *args, **kwargs):
super(DatabaseCreation, self).__init__(*args, **kwargs)
# Avoid using the inet data type, because using it from JDBC is a pain.
#
# By reading http://archives.postgresql.org/pgsql-jdbc/2007-08/msg00089.php
# seems like we would have to patch the JDBC driver with this extension:
# http://oak.cats.ohiou.edu/~rf358197/jdbc/2/.
self.data_types = copy.copy(PostgresqlDatabaseCreation.data_types)
self.data_types['IPAddressField'] = 'char(15)'
| Python |
"""
PostgreSQL database backend for Django/Jython
"""
try:
from com.ziclix.python.sql import zxJDBC as Database
except ImportError, e:
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured("Error loading zxJDBC module: %s" % e)
from django.db.backends import BaseDatabaseFeatures, BaseDatabaseValidation
from django.db.backends.postgresql.operations import DatabaseOperations as PostgresqlDatabaseOperations
from django.db.backends.postgresql.client import DatabaseClient
from django.db.backends.postgresql.introspection import DatabaseIntrospection
from doj.backends.zxjdbc.postgresql.creation import DatabaseCreation
from doj.backends.zxjdbc.common import (
zxJDBCDatabaseWrapper, zxJDBCOperationsMixin, zxJDBCFeaturesMixin,
zxJDBCCursorWrapper, set_default_isolation_level)
from com.ziclix.python.sql.handler import PostgresqlDataHandler
from UserDict import DictMixin
DatabaseError = Database.DatabaseError
IntegrityError = Database.IntegrityError
class DatabaseFeatures(zxJDBCFeaturesMixin, BaseDatabaseFeatures):
pass
class DatabaseOperations(zxJDBCOperationsMixin, PostgresqlDatabaseOperations):
pass # The mixin contains all what is needed
class SettingsModuleAsDict(DictMixin):
def __init__(self, module):
self.module = module
def __getitem__(self, name):
return getattr(self.module, name)
def __setitem__(self, name, value):
setattr(self.module, name, value)
def __delitem__(self, name):
self.module.__delattr__(name)
def keys(self):
return dir(self.module)
class DatabaseWrapper(zxJDBCDatabaseWrapper):
driver_class_name = "org.postgresql.Driver"
jdbc_url_pattern = \
"jdbc:postgresql://%(HOST)s%(PORT)s/%(NAME)s"
operators = {
'exact': '= %s',
'iexact': 'ILIKE %s',
'contains': 'LIKE %s',
'icontains': 'ILIKE %s',
'regex': '~ %s',
'iregex': '~* %s',
'gt': '> %s',
'gte': '>= %s',
'lt': '< %s',
'lte': '<= %s',
'startswith': 'LIKE %s',
'endswith': 'LIKE %s',
'istartswith': 'ILIKE %s',
'iendswith': 'ILIKE %s',
}
def __init__(self, *args, **kwargs):
super(DatabaseWrapper, self).__init__(*args, **kwargs)
self.features = DatabaseFeatures(self)
self.ops = DatabaseOperations(self)
self.client = DatabaseClient(self)
self.creation = DatabaseCreation(self)
self.introspection = DatabaseIntrospection(self)
self.validation = BaseDatabaseValidation(self)
def _cursor(self):
if self.connection is None:
self.connection = self.new_connection()
# make transactions transparent to all cursors
set_default_isolation_level(self.connection)
real_cursor = self.connection.cursor()
# Use the PostgreSQL DataHandler for better compatibility:
real_cursor.datahandler = PostgresqlDataHandler(real_cursor.datahandler)
return CursorWrapper(real_cursor)
class CursorWrapper(zxJDBCCursorWrapper):
def execute(self, *args, **kwargs):
try:
super(CursorWrapper, self).execute(*args, **kwargs)
except Database.Error:
# PostgreSQL connections become unusable after an exception
# occurs, unless the current transaction is rollback'ed.
self.connection.rollback()
raise
def executemany(self, *args, **kwargs):
try:
super(CursorWrapper, self).executemany(*args, **kwargs)
except Database.Error:
# PostgreSQL connections become unusable after an exception
# occurs, unless the current transaction is rollback'ed.
self.connection.rollback()
raise
import platform
if tuple(platform.python_version_tuple()) < ('2', '5', '2'):
# Workaround Jython bug http://bugs.jython.org/issue1499: PostgreSQL
# datahandler should return Decimals instead of floats for NUMERIC/DECIMAL
# columns
OriginalPostgresqlDataHandler = PostgresqlDataHandler
from java.sql import Types
from decimal import Decimal
class PostgresqlDataHandler(OriginalPostgresqlDataHandler):
def getPyObject(self, set, col, type):
if type in (Types.NUMERIC, Types.DECIMAL):
value = set.getBigDecimal(col)
if value is None:
return None
else:
return Decimal(str(value))
else:
return OriginalPostgresqlDataHandler.getPyObject(
self, set, col, type)
| Python |
# Empty file. And this comment is to keep patch/diff happy
| Python |
# -*- coding: utf-8 -*-
import datetime
from java.sql import Connection
from com.ziclix.python.sql import zxJDBC
from django.db.backends import BaseDatabaseWrapper
class zxJDBCDatabaseWrapper(BaseDatabaseWrapper):
default_host = 'localhost'
default_port = ''
driver_class_name = None # Must be overriden
jdbc_url_pattern = None # Must be overriden
def __init__(self, *args, **kwargs):
super(zxJDBCDatabaseWrapper, self).__init__(*args, **kwargs)
def jdbc_url(self):
return self.jdbc_url_pattern % self.settings_dict_postprocesed()
def settings_dict_postprocesed(self):
settings_dict = self.settings_dict.copy() # Avoid messing with the
# original settings
host, port = settings_dict['HOST'], settings_dict['PORT']
if not host:
settings_dict['HOST'] = self.default_host
if port:
settings_dict['PORT'] = ":%s" % port
elif self.default_port:
settings_dict['PORT'] = ":%s" % self.default_port
return settings_dict
def new_connection(self):
connection = self.new_jndi_connection()
if not connection:
settings_dict = self.settings_dict
if settings_dict['NAME'] == '':
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured(
"You need to specify DATABASE NAME in your Django settings file.")
connection = zxJDBC.connect(self.jdbc_url(),
settings_dict['USER'],
settings_dict['PASSWORD'],
self.driver_class_name,
**settings_dict['OPTIONS'])
return connection
def new_jndi_connection(self):
"""
Returns a zxJDBC Connection object obtained from a JNDI data source if
the settings dictionary contains the JNDI_NAME entry on the
DATABASE_OPTIONS dictionary, or None if it doesn't.
"""
settings_dict = self.settings_dict
if 'DATABASE_OPTIONS' not in settings_dict:
return None
if 'JNDI_NAME' not in settings_dict['DATABASE_OPTIONS']:
return None
name = settings_dict['DATABASE_OPTIONS']['JNDI_NAME']
props = settings_dict['DATABASE_OPTIONS'].get('JNDI_CONTEXT_OPTIONS', {})
# Default the JNDI endpoint to a Glassfish instance
# running on localhost
# jndi_endpoint = settings_dict['DATABASE_OPTIONS'].get('JNDI_ENDPOINT', 'localhost:3700')
# jndi_ctx_factory = settings_dict['DATABASE_OPTIONS'].get('JNDI_INITIAL_CONTEXT_FACTORY', 'localhost:3700')
# props = {'com.sun.appserv.iiop.endpoints':jndi_endpoint,
# Context.INITIAL_CONTEXT_FACTORY:jndi_ctx_factory}
return zxJDBC.lookup(name, keywords=props)
class zxJDBCOperationsMixin(object):
# zxJDBC supports dates, times, datetimes and decimal directly
def value_to_db_date(self, value):
return value
def value_to_db_datetime(self, value):
return value
def value_to_db_time(self, value):
return value
def value_to_db_decimal(self, value, max_digits, decimal_places):
return value
def year_lookup_bounds(self, value):
first = datetime.datetime(value, 1, 1)
second = datetime.datetime(value, 12, 31,
23, 59, 59, 999999)
return [first, second]
class zxJDBCFeaturesMixin(object):
needs_datetime_string_cast = False
class zxJDBCCursorWrapper(object):
"""
A simple wrapper to do the "%s" -> "?" replacement before running zxJDBC's
execute or executemany
"""
def __init__(self, cursor):
self.cursor = cursor
def execute(self, sql, params=()):
sql = sql % (('?',) * len(params))
self.cursor.execute(sql, params)
def executemany(self, sql, param_list):
if len(param_list) > 0:
sql = sql % (('?',) * len(param_list[0]))
self.cursor.executemany(sql, param_list)
def __getattr__(self, attr):
return getattr(self.cursor, attr)
def __iter__(self):
return iter(self.next, None)
# Must be called by zxJDBC backends after instantiating a connection
def set_default_isolation_level(connection, innodb_binlog = False):
jdbc_conn = connection.__connection__
if innodb_binlog:
jdbc_conn.setTransactionIsolation(Connection.TRANSACTION_REPEATABLE_READ)
else:
jdbc_conn.setTransactionIsolation(Connection.TRANSACTION_READ_COMMITTED)
| Python |
"""
SQLite3 backend for Django/Jython.
"""
from django.db.backends import BaseDatabaseFeatures
from django.db.backends import BaseDatabaseOperations, BaseDatabaseValidation, util
from django.db.backends.sqlite3.client import DatabaseClient
from django.db.backends.sqlite3.creation import DatabaseCreation
from django.db.backends.sqlite3.introspection import DatabaseIntrospection
try:
from com.ziclix.python.sql import zxJDBC as Database
except ImportError, e:
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured("Error loading zxJDBC module: %s" % e)
try:
from org.sqlite import JDBC
except ImportError, e:
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured("Error loading SQLite3 JDBC driver: %s" % e)
from doj.backends.zxjdbc.common import (
zxJDBCDatabaseWrapper, zxJDBCOperationsMixin, zxJDBCFeaturesMixin,
zxJDBCCursorWrapper)
from org.sqlite import Function
DatabaseError = Database.DatabaseError
IntegrityError = Database.IntegrityError
# Copied from sqlite3 backend
class DatabaseFeatures(zxJDBCFeaturesMixin, BaseDatabaseFeatures):
# SQLite cannot handle us only partially reading from a cursor's result set
# and then writing the same rows to the database in another cursor. This
# setting ensures we always read result sets fully into memory all in one
# go.
can_use_chunked_reads = False
# Copied from sqlite3 backend
class DatabaseOperations(zxJDBCOperationsMixin, BaseDatabaseOperations):
def date_extract_sql(self, lookup_type, field_name):
# sqlite doesn't support extract, so we fake it with the user-defined
# function django_extract that's registered in connect().
return 'django_extract("%s", %s)' % (lookup_type.lower(), field_name)
def date_trunc_sql(self, lookup_type, field_name):
# sqlite doesn't support DATE_TRUNC, so we fake it with a user-defined
# function django_date_trunc that's registered in connect().
return 'django_date_trunc("%s", %s)' % (lookup_type.lower(), field_name)
def drop_foreignkey_sql(self):
return ""
def pk_default_value(self):
return 'NULL'
def quote_name(self, name):
if name.startswith('"') and name.endswith('"'):
return name # Quoting once is enough.
return '"%s"' % name
def no_limit_value(self):
return -1
def sql_flush(self, style, tables, sequences):
# NB: The generated SQL below is specific to SQLite
# Note: The DELETE FROM... SQL generated below works for SQLite databases
# because constraints don't exist
sql = ['%s %s %s;' % \
(style.SQL_KEYWORD('DELETE'),
style.SQL_KEYWORD('FROM'),
style.SQL_FIELD(self.quote_name(table))
) for table in tables]
# Note: No requirement for reset of auto-incremented indices (cf. other
# sql_flush() implementations). Just return SQL at this point
return sql
# With the exception of _cursor and zxJDBCDatabaseWrapper properties, also
# copied from the sqlite3 backend:
class DatabaseWrapper(zxJDBCDatabaseWrapper):
driver_class_name = 'org.sqlite.JDBC'
jdbc_url_pattern = "jdbc:sqlite:%(DATABASE_NAME)s"
# SQLite requires LIKE statements to include an ESCAPE clause if the value
# being escaped has a percent or underscore in it.
# See http://www.sqlite.org/lang_expr.html for an explanation.
operators = {
'exact': '= %s',
'iexact': "LIKE %s ESCAPE '\\'",
'contains': "LIKE %s ESCAPE '\\'",
'icontains': "LIKE %s ESCAPE '\\'",
'regex': 'REGEXP %s',
'iregex': "REGEXP '(?i)' || %s",
'gt': '> %s',
'gte': '>= %s',
'lt': '< %s',
'lte': '<= %s',
'startswith': "LIKE %s ESCAPE '\\'",
'endswith': "LIKE %s ESCAPE '\\'",
'istartswith': "LIKE %s ESCAPE '\\'",
'iendswith': "LIKE %s ESCAPE '\\'",
}
def __init__(self, *args, **kwargs):
super(DatabaseWrapper, self).__init__(*args, **kwargs)
self.features = DatabaseFeatures()
self.ops = DatabaseOperations()
self.client = DatabaseClient(self)
self.creation = DatabaseCreation(self)
self.introspection = DatabaseIntrospection(self)
self.validation = BaseDatabaseValidation()
def _cursor(self):
if self.connection is None:
self.connection = self.new_connection()
# set_default_isolation_level(self.connection) not working :(
# Register extract, date_trunc, and regexp functions.
_create_function(self.connection.__connection__,
"django_extract", 2, _sqlite_extract)
_create_function(self.connection.__connection__,
"django_date_trunc", 2, _sqlite_date_trunc)
_create_function(self.connection.__connection__,
"regexp", 2, _sqlite_regexp)
return CursorWrapper(self.connection.cursor())
def close(self):
from django.conf import settings
# If database is in memory, closing the connection destroys the
# database. To prevent accidental data loss, ignore close requests on
# an in-memory db.
if self.settings_dict['DATABASE_NAME'] != ":memory:":
BaseDatabaseWrapper.close(self)
CursorWrapper = zxJDBCCursorWrapper
def _create_function(conn, name, num_args, py_func):
class func(Function):
def xFunc(self):
assert self.args() == num_args
args = [self.value_string(n) for n in xrange(0, num_args)]
ret = py_func(*args)
self.result(ret)
Function.create(conn, name, func())
# Functions copied from sqlite3 backend:
def _sqlite_extract(lookup_type, dt):
try:
dt = util.typecast_timestamp(dt)
except (ValueError, TypeError):
return None
return getattr(dt, lookup_type)
def _sqlite_date_trunc(lookup_type, dt):
try:
dt = util.typecast_timestamp(dt)
except (ValueError, TypeError):
return None
if lookup_type == 'year':
return "%i-01-01 00:00:00" % dt.year
elif lookup_type == 'month':
return "%i-%02i-01 00:00:00" % (dt.year, dt.month)
elif lookup_type == 'day':
return "%i-%02i-%02i 00:00:00" % (dt.year, dt.month, dt.day)
def _sqlite_regexp(re_pattern, re_string):
import re
try:
return bool(re.search(re_pattern, re_string))
except:
return False
| Python |
# Empty file. And this comment is to keep patch/diff happy
| Python |
# Empty file
| Python |
"""
Custom Query class for Oracle.
Derived from: django.db.models.sql.query.Query
"""
import datetime
from django.db.backends import util
# Cache. Maps default query class to new Oracle query class.
_classes = {}
def query_class(QueryClass, Database):
"""
Returns a custom django.db.models.sql.query.Query subclass that is
appropriate for Oracle.
The 'Database' module (cx_Oracle) is passed in here so that all the setup
required to import it only needs to be done by the calling module.
"""
global _classes
try:
return _classes[QueryClass]
except KeyError:
pass
class OracleQuery(QueryClass):
def __reduce__(self):
"""
Enable pickling for this class (normal pickling handling doesn't
work as Python can only pickle module-level classes by default).
"""
if hasattr(QueryClass, '__getstate__'):
assert hasattr(QueryClass, '__setstate__')
data = self.__getstate__()
else:
data = self.__dict__
return (unpickle_query_class, (QueryClass,), data)
def resolve_columns(self, row, fields=()):
# If this query has limit/offset information, then we expect the
# first column to be an extra "_RN" column that we need to throw
# away.
if self.high_mark is not None or self.low_mark:
rn_offset = 1
else:
rn_offset = 0
index_start = rn_offset + len(self.extra_select.keys())
values = [self.convert_values(v, None)
for v in row[rn_offset:index_start]]
for value, field in map(None, row[index_start:], fields):
values.append(self.convert_values(value, field))
return tuple(values)
#def resolve_columns(self, row, fields=()):
# index_start = len(self.extra_select.keys())
# values = [self.convert_values(v, type(v)) for v in row[:index_start]]
# for value, field in map(None, row[index_start:], fields):
# values.append(self.convert_values(value, field))
# return values
def convert_values(self, value, field):
from django.db.models.fields import DateField, DateTimeField, \
TimeField, BooleanField, NullBooleanField, DecimalField, FloatField, Field
# Oracle stores empty strings as null. We need to undo this in
# order to adhere to the Django convention of using the empty
# string instead of null, but only if the field accepts the
# empty string.
if value is None:
pass
elif value is None and isinstance(field, Field) and field.empty_strings_allowed:
value = u''
# Convert 1 or 0 to True or False
elif isinstance(value, float):
value = float(value)
# Added 04-26-2009 to repair "Invalid literal for int() base 10" error
elif isinstance(value,int):
value = int(value)
elif isinstance(value,unicode):
value = unicode(value)
elif field is not None and field.get_internal_type() == 'AutoField':
value = int(float(value))
elif value in (1, 0) and field is not None and field.get_internal_type() in ('BooleanField', 'NullBooleanField'):
value = bool(value)
# Force floats to the correct type
elif field is not None and field.get_internal_type() == 'FloatField':
value = float(value)
# Convert floats to decimals
elif field is not None and field.get_internal_type() == 'DecimalField':
value = util.typecast_decimal(field.format_number(value))
elif field is not None and field.get_internal_type() == 'SmallIntegerField':
value = util.typecast_decimal(field.format_number(value))
return value
def as_sql(self, with_limits=True, with_col_aliases=False):
"""
Creates the SQL for this query. Returns the SQL string and list
of parameters. This is overriden from the original Query class
to handle the additional SQL Oracle requires to emulate LIMIT
and OFFSET.
If 'with_limits' is False, any limit/offset information is not
included in the query.
"""
# The `do_offset` flag indicates whether we need to construct
# the SQL needed to use limit/offset with Oracle.
do_offset = with_limits and (self.high_mark is not None
or self.low_mark)
if not do_offset:
sql, params = super(OracleQuery, self).as_sql(with_limits=False,
with_col_aliases=with_col_aliases)
else:
# `get_columns` needs to be called before `get_ordering` to
# populate `_select_alias`.
sql, params = super(OracleQuery, self).as_sql(with_limits=False,
with_col_aliases=True)
# Wrap the base query in an outer SELECT * with boundaries on
# the "_RN" column. This is the canonical way to emulate LIMIT
# and OFFSET on Oracle.
high_where = ''
if self.high_mark is not None:
high_where = 'WHERE ROWNUM <= %d' % (self.high_mark,)
sql = 'SELECT * FROM (SELECT ROWNUM AS "_RN", "_SUB".* FROM (%s) "_SUB" %s) WHERE "_RN" > %d' % (sql, high_where, self.low_mark)
return sql, params
#def set_limits(self, low=None, high=None):
# super(OracleQuery, self).set_limits(low, high)
# We need to select the row number for the LIMIT/OFFSET sql.
# A placeholder is added to extra_select now, because as_sql is
# too late to be modifying extra_select. However, the actual sql
# depends on the ordering, so that is generated in as_sql.
# self.extra_select['_RN'] = ('1', '')
#def clear_limits(self):
# super(OracleQuery, self).clear_limits()
# if '_RN' in self.extra_select:
# del self.extra_select['_RN']
_classes[QueryClass] = OracleQuery
return OracleQuery
def unpickle_query_class(QueryClass):
"""
Utility function, called by Python's unpickling machinery, that handles
unpickling of Oracle Query subclasses.
"""
# XXX: Would be nice to not have any dependency on cx_Oracle here. Since
# modules can't be pickled, we need a way to know to load the right module.
from com.ziclix.python.sql import zxJDBC
klass = query_class(QueryClass, zxJDBC)
return klass.__new__(klass)
unpickle_query_class.__safe_for_unpickling__ = True
| Python |
# Highly based on Django's builtin Oracle backend.
#
# In fact, we only change the data_types_reverse dictionary. Unfortunately we
# can't just subclass their DatabaseIntrospection class because their module
# imports cx_Oracle. (We could do nasty monkey-patching trickery, but...)
from django.db.backends import BaseDatabaseIntrospection
from com.ziclix.python.sql import zxJDBC
import re
foreign_key_re = re.compile(r"\sCONSTRAINT `[^`]*` FOREIGN KEY \(`([^`]*)`\) REFERENCES `([^`]*)` \(`([^`]*)`\)")
class DatabaseIntrospection(BaseDatabaseIntrospection):
# Maps type objects to Django Field types.
data_types_reverse = {
zxJDBC.CLOB: 'TextField',
zxJDBC.VARCHAR: 'CharField',
zxJDBC.FLOAT: 'FloatField',
zxJDBC.DATETIME: 'DateTimeField',
zxJDBC.STRING: 'CharField',
zxJDBC.TIMESTAMP: 'DateTimeField',
zxJDBC.NUMERIC: 'SmallIntegerField',
zxJDBC.INTEGER: 'IntegerField',
zxJDBC.DECIMAL: 'Decimal',
zxJDBC.NUMBER: 'Decimal'
}
def get_table_list(self, cursor):
"Returns a list of table names in the current database."
cursor.execute("SELECT TABLE_NAME FROM USER_TABLES")
return [row[0].upper() for row in cursor.fetchall()]
def get_table_description(self, cursor, table_name):
"Returns a description of the table, with the DB-API cursor.description interface."
cursor.execute("SELECT * FROM %s WHERE ROWNUM < 2" % self.connection.ops.quote_name(table_name))
return cursor.description
def table_name_converter(self, name):
"Table name comparison is case insensitive under Oracle"
return name.upper()
def _name_to_index(self, cursor, table_name):
"""
Returns a dictionary of {field_name: field_index} for the given table.
Indexes are 0-based.
"""
return dict([(d[0], i) for i, d in enumerate(self.get_table_description(cursor, table_name))])
def get_relations(self, cursor, table_name):
"""
Returns a dictionary of {field_index: (field_index_other_table, other_table)}
representing all relationships to the given table. Indexes are 0-based.
"""
cursor.execute("""
SELECT ta.column_id - 1, tb.table_name, tb.column_id - 1
FROM user_constraints, USER_CONS_COLUMNS ca, USER_CONS_COLUMNS cb,
user_tab_cols ta, user_tab_cols tb
WHERE user_constraints.table_name = %s AND
ta.table_name = %s AND
ta.column_name = ca.column_name AND
ca.table_name = %s AND
user_constraints.constraint_name = ca.constraint_name AND
user_constraints.r_constraint_name = cb.constraint_name AND
cb.table_name = tb.table_name AND
cb.column_name = tb.column_name AND
ca.position = cb.position""", [table_name, table_name, table_name])
relations = {}
for row in cursor.fetchall():
relations[row[0]] = (row[2], row[1])
return relations
def get_indexes(self, cursor, table_name):
"""
Returns a dictionary of fieldname -> infodict for the given table,
where each infodict is in the format:
{'primary_key': boolean representing whether it's the primary key,
'unique': boolean representing whether it's a unique index}
"""
# This query retrieves each index on the given table, including the
# first associated field name
# "We were in the nick of time; you were in great peril!"
sql = """
WITH primarycols AS (
SELECT user_cons_columns.table_name, user_cons_columns.column_name, 1 AS PRIMARYCOL
FROM user_cons_columns, user_constraints
WHERE user_cons_columns.constraint_name = user_constraints.constraint_name AND
user_constraints.constraint_type = 'P' AND
user_cons_columns.table_name = %s),
uniquecols AS (
SELECT user_ind_columns.table_name, user_ind_columns.column_name, 1 AS UNIQUECOL
FROM user_indexes, user_ind_columns
WHERE uniqueness = 'UNIQUE' AND
user_indexes.index_name = user_ind_columns.index_name AND
user_ind_columns.table_name = %s)
SELECT allcols.column_name, primarycols.primarycol, uniquecols.UNIQUECOL
FROM (SELECT column_name FROM primarycols UNION SELECT column_name FROM
uniquecols) allcols,
primarycols, uniquecols
WHERE allcols.column_name = primarycols.column_name (+) AND
allcols.column_name = uniquecols.column_name (+)
"""
cursor.execute(sql, [table_name, table_name])
indexes = {}
for row in cursor.fetchall():
# row[1] (idx.indkey) is stored in the DB as an array. It comes out as
# a string of space-separated integers. This designates the field
# indexes (1-based) of the fields that have indexes on the table.
# Here, we skip any indexes across multiple fields.
indexes[row[0]] = {'primary_key': row[1], 'unique': row[2]}
return indexes
| Python |
"""
Author: Josh Juneau
Wrapper for Django-Jython Oracle implementation for zxJDBC calls
"""
from decimal import Decimal # Added for issue 39
try:
from com.ziclix.python.sql import zxJDBC as Database
except ImportError, e:
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured("Error loading zxJDBC module: %s" % e)
from django.utils.encoding import smart_str, force_unicode
class zxJDBCCursorWrapperOracle(object):
"""
A simple wrapper to do the "%s" -> "?" replacement before running zxJDBC's
execute or executemany
"""
def __init__(self, cursor):
self.cursor = cursor
def execute(self, sql, params=()):
if params is None:
params = []
else:
params = params
if len(params) > 0:
sql = sql % (('?',) * len(params))
if sql.endswith(';') or sql.endswith('/'):
sql = sql[:-1]
# Patch LIMIT clause for Oracle to implement ROWNUM
if sql.count(' LIMIT'):
limit_idx = sql.find(' LIMIT')
limit_rows = sql[limit_idx:]
# Replace LIMIT clause with blank
sql = sql.replace(sql[limit_idx:],'')
# If WHERE clause then make ROWNUM evaluation first
if sql.count(' WHERE'):
sql.replace(' WHERE', ' WHERE ROWNUM <= %s AND' % limit_rows)
else:
if sql.count(' ORDER BY'):
sql.replace(' ORDER BY', ' WHERE ROWNUM <= %s ORDER BY' % limit_rows)
else:
if sql.count(' ASC'):
sql.replace(' ASC', ' WHERE ROWNUM <= %s ASC' % limit_rows)
elif sql.count(' DESC'):
sql.replace(' DESC', ' WHERE ROWNUM <= %s DESC' % limit_rows)
# Strip any OFFSET references. Not the best implementation, certainly
# needs some work. However, this does the trick for now.
if sql.find('OFFSET') > 0:
offset_int = int(sql[sql.find('OFFSET') + 7:])
sql = sql.replace(' OFFSET %d' % offset_int,'')
self.cursor.execute(sql, params)
def executemany(self, sql, param_list):
if len(param_list) > 0:
sql = sql % (('?',) * len(param_list[0]))
self.cursor.executemany(sql, param_list)
def fetchone(self):
row = self.cursor.fetchone()
if row is None:
return row
return self._rowfactory(row)
def fetchmany(self, size=None):
if size is None:
size = self.arraysize
return tuple([self._rowfactory(r)
for r in self.cursor.fetchmany(size)])
def fetchall(self):
return tuple([self._rowfactory(r)
for r in self.cursor.fetchall()])
def _rowfactory(self, row):
# Cast numeric values as the appropriate Python type based upon the
# cursor description, and convert strings to unicode.
casted = []
for value, desc in zip(row, self.cursor.description):
# Altered on 04-26-2009 JJ
# Changed 'is' to '==' for NUMBER comparison
if value is not None and desc[1] == Database.NUMBER:
precision, scale = desc[4:6]
if scale == -127:
if precision == 0:
# NUMBER column: decimal-precision floating point
# This will normally be an integer from a sequence,
# but it could be a decimal value.
if isinstance(value, float):
pass
else:
if '.' in value:
value = Decimal(value)
else:
value = int(value)
else:
# FLOAT column: binary-precision floating point.
# This comes from FloatField columns.
value = float(value)
elif precision > 0:
# NUMBER(p,s) column: decimal-precision fixed point.
# This comes from IntField and DecimalField columns.
if scale == 0:
value = int(value)
else:
value = Decimal(value)
else:
value = int(value)
else:
value = to_unicode(value)
casted.append(value)
return tuple(casted)
def __getattr__(self, attr):
return getattr(self.cursor, attr)
def to_unicode(s):
"""
Convert strings to Unicode objects (and return all other data types
unchanged).
"""
if isinstance(s, basestring):
return force_unicode(s)
return s
| Python |
"""
Oracle database backend for Django-Jython
"""
try:
from com.ziclix.python.sql import zxJDBC as Database
except ImportError, e:
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured("Error loading zxJDBC module: %s" % e)
import os
from django.db.backends import *
from doj.backends.zxjdbc.oracle import query
from django.db.backends.oracle.client import DatabaseClient
from django.db.backends.oracle.creation import DatabaseCreation
from doj.backends.zxjdbc.oracle.introspection import DatabaseIntrospection
from django.utils.encoding import smart_str
from doj.backends.zxjdbc.common import zxJDBCOperationsMixin, zxJDBCFeaturesMixin
from doj.backends.zxjdbc.oracle.zxJDBCCursorWrapperOracle import zxJDBCCursorWrapperOracle
from UserDict import DictMixin
import django
# Oracle takes client-side character set encoding from the environment.
os.environ['NLS_LANG'] = '.UTF8'
# This prevents unicode from getting mangled by getting encoded into the
# potentially non-unicode database character set.
os.environ['ORA_NCHAR_LITERAL_REPLACE'] = 'TRUE'
DatabaseError = Database.DatabaseError
IntegrityError = Database.IntegrityError
class DatabaseFeatures(zxJDBCFeaturesMixin, BaseDatabaseFeatures):
empty_fetchmany_value = ()
needs_datetime_string_cast = False
uses_custom_query_class = True
interprets_empty_strings_as_nulls = True
class DatabaseOperations(zxJDBCOperationsMixin, BaseDatabaseOperations):
def autoinc_sql(self, table, column):
# # To simulate auto-incrementing primary keys in Oracle, we have to
# # create a sequence and a trigger.
sq_name = get_sequence_name(table)
tr_name = get_trigger_name(table)
tbl_name = self.quote_name(table)
col_name = self.quote_name(column)
sequence_sql = """
DECLARE
i INTEGER;
BEGIN
SELECT COUNT(*) INTO i FROM USER_CATALOG
WHERE TABLE_NAME = '%(sq_name)s' AND TABLE_TYPE = 'SEQUENCE';
IF i = 0 THEN
EXECUTE IMMEDIATE 'CREATE SEQUENCE "%(sq_name)s"';
END IF;
END;
/""" % locals()
trigger_sql = """
CREATE OR REPLACE TRIGGER "%(tr_name)s"
BEFORE INSERT ON %(tbl_name)s
FOR EACH ROW
WHEN (new.%(col_name)s IS NULL)
BEGIN
SELECT %(sq_name)s.nextval
INTO :new.%(col_name)s FROM dual;
END;
/""" % locals()
return sequence_sql, trigger_sql
def date_extract_sql(self, lookup_type, field_name):
# # http://download-east.oracle.com/docs/cd/B10501_01/server.920/a96540/functions42a.htm#1017163
return "EXTRACT(%s FROM %s)" % (lookup_type, field_name)
def date_trunc_sql(self, lookup_type, field_name):
# Oracle uses TRUNC() for both dates and numbers.
# http://download-east.oracle.com/docs/cd/B10501_01/server.920/a96540/functions155a.htm#SQLRF06151
if lookup_type == 'day':
sql = 'TRUNC(%s)' % field_name
else:
sql = "TRUNC(%s, '%s')" % (field_name, lookup_type)
return sql
def datetime_cast_sql(self):
return "TO_TIMESTAMP(%s, 'YYYY-MM-DD HH24:MI:SS.FF')"
def deferrable_sql(self):
return " DEFERRABLE INITIALLY DEFERRED"
def drop_sequence_sql(self, table):
return "DROP SEQUENCE %s;" % self.quote_name(get_sequence_name(table))
def field_cast_sql(self, db_type):
if db_type and db_type.endswith('LOB'):
return "DBMS_LOB.SUBSTR(%s)"
else:
return "%s"
def last_insert_id(self, cursor, table_name, pk_name):
sq_name = util.truncate_name(table_name, self.max_name_length() - 3)
cursor.execute('SELECT %s_sq.currval FROM dual' % sq_name)
return cursor.fetchone()[0]
def lookup_cast(self, lookup_type):
if lookup_type in ('iexact', 'icontains', 'istartswith', 'iendswith'):
return "UPPER(%s)"
return "%s"
def max_name_length(self):
return 30
def prep_for_iexact_query(self, x):
return x
def query_class(self, DefaultQueryClass):
return query.query_class(DefaultQueryClass, Database)
def quote_name(self, name):
# SQL92 requires delimited (quoted) names to be case-sensitive. When
# not quoted, Oracle has case-insensitive behavior for identifiers, but
# always defaults to uppercase.
# We simplify things by making Oracle identifiers always uppercase.
if not name.startswith('"') and not name.endswith('"'):
name = '"%s"' % util.truncate_name(name.upper(), self.max_name_length())
return name.upper()
def random_function_sql(self):
return "DBMS_RANDOM.RANDOM"
def regex_lookup_9(self, lookup_type):
raise NotImplementedError("Regexes are not supported in Oracle before version 10g.")
def regex_lookup_10(self, lookup_type):
if lookup_type == 'regex':
match_option = "'c'"
else:
match_option = "'i'"
return 'REGEXP_LIKE(%%s, %%s, %s)' % match_option
def regex_lookup(self, lookup_type):
# If regex_lookup is called before it's been initialized, then create
# a cursor to initialize it and recur.
from django.db import connection
connection.cursor()
return connection.ops.regex_lookup(lookup_type)
def sql_flush(self, style, tables, sequences):
# Return a list of 'TRUNCATE x;', 'TRUNCATE y;',
# 'TRUNCATE z;'... style SQL statements
if tables:
# Oracle does support TRUNCATE, but it seems to get us into
# FK referential trouble, whereas DELETE FROM table works.
sql = ['%s %s %s;' % \
(style.SQL_KEYWORD('DELETE'),
style.SQL_KEYWORD('FROM'),
style.SQL_FIELD(self.quote_name(table))
) for table in tables]
# Since we've just deleted all the rows, running our sequence
# ALTER code will reset the sequence to 0.
for sequence_info in sequences:
sequence_name = get_sequence_name(sequence_info['table'])
table_name = self.quote_name(sequence_info['table'])
column_name = self.quote_name(sequence_info['column'] or 'id')
query = _get_sequence_reset_sql() % {'sequence': sequence_name,
'table': table_name,
'column': column_name}
sql.append(query)
return sql
else:
return []
def sequence_reset_sql(self, style, model_list):
from django.db import models
output = []
query = _get_sequence_reset_sql()
for model in model_list:
for f in model._meta.local_fields:
if isinstance(f, models.AutoField):
table_name = self.quote_name(model._meta.db_table)
sequence_name = get_sequence_name(model._meta.db_table)
column_name = self.quote_name(f.column)
output.append(query % {'sequence': sequence_name,
'table': table_name,
'column': column_name})
break # Only one AutoField is allowed per model, so don't bother continuing.
for f in model._meta.many_to_many:
table_name = self.quote_name(f.m2m_db_table())
sequence_name = get_sequence_name(f.m2m_db_table())
column_name = self.quote_name('id')
output.append(query % {'sequence': sequence_name,
'table': table_name,
'column': column_name})
return output
def start_transaction_sql(self):
return ''
def tablespace_sql(self, tablespace, inline=False):
return "%sTABLESPACE %s" % ((inline and "USING INDEX " or ""), self.quote_name(tablespace))
def value_to_db_time(self, value):
if value is None:
return None
if isinstance(value, basestring):
return datetime.datetime(*(time.strptime(value, '%H:%M:%S')[:6]))
return datetime.datetime(1900, 1, 1, value.hour, value.minute,
value.second, value.microsecond)
def year_lookup_bounds_for_date_field(self, value):
first = '%s-01-01'
second = '%s-12-31'
return [first % value, second % value]
class SettingsModuleAsDict(DictMixin):
def __init__(self, module):
self.module = module
def __getitem__(self, name):
return getattr(self.module, name)
def __setitem__(self, name, value):
setattr(self.module, name, value)
def __delitem__(self, name):
self.module.__delattr__(name)
def keys(self):
return dir(self.module)
class DatabaseWrapper(BaseDatabaseWrapper):
operators = {
'exact': '= %s',
'iexact': '= UPPER(%s)',
'contains': "LIKEC %s ESCAPE '\\'",
'icontains': "LIKEC UPPER(%s) ESCAPE '\\'",
'gt': '> %s',
'gte': '>= %s',
'lt': '< %s',
'lte': '<= %s',
'startswith': "LIKEC %s ESCAPE '\\'",
'endswith': "LIKEC %s ESCAPE '\\'",
'istartswith': "LIKEC UPPER(%s) ESCAPE '\\'",
'iendswith': "LIKEC UPPER(%s) ESCAPE '\\'",
}
oracle_version = None
def __init__(self, *args, **kwargs):
super(DatabaseWrapper, self).__init__(*args, **kwargs)
self.features = DatabaseFeatures(self)
self.ops = DatabaseOperations(self)
self.client = DatabaseClient(self)
self.creation = DatabaseCreation(self)
self.introspection = DatabaseIntrospection(self)
self.validation = BaseDatabaseValidation(self)
def _valid_connection(self):
return self.connection is not None
def _cursor(self, *args):
settings_dict = self.settings_dict
return self._cursor_from_settings_dict(settings_dict)
def _cursor_from_settings_dict(self, settings_dict):
cursor = None
if self.connection is None:
# Configure and connect to database using zxJDBC
if settings_dict['NAME'] == '':
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured("You need to specify NAME in your Django settings file.")
host = settings_dict['HOST'] or 'localhost'
port = (settings_dict['PORT']
and (':%s' % settings_dict['PORT'])
or '')
conn_string = "jdbc:oracle:thin:@%s%s:%s" % (host, port,
settings_dict['NAME'])
self.connection = Database.connect(
conn_string,
settings_dict['USER'],
settings_dict['PASSWORD'],
"oracle.jdbc.OracleDriver",
**settings_dict['OPTIONS'])
# make transactions transparent to all cursors
cursor = CursorWrapper(self.connection.cursor())
# Set oracle date to ansi date format. This only needs to execute
# once when we create a new connection.
cursor.execute("ALTER SESSION SET NLS_DATE_FORMAT = 'YYYY-MM-DD' "
"NLS_TIMESTAMP_FORMAT = 'YYYY-MM-DD HH24:MI:SS.FF'")
try:
self.connection.stmtcachesize = 20
except:
pass
if not cursor:
cursor = CursorWrapper(self.connection.cursor())
return cursor
CursorWrapper = zxJDBCCursorWrapperOracle
def _get_sequence_reset_sql():
# TODO: colorize this SQL code with style.SQL_KEYWORD(), etc.
return """
DECLARE
startvalue integer;
cval integer;
BEGIN
LOCK TABLE %(table)s IN SHARE MODE;
SELECT NVL(MAX(%(column)s), 0) INTO startvalue FROM %(table)s;
SELECT %(sequence)s.nextval INTO cval FROM dual;
cval := startvalue - cval;
IF cval != 0 THEN
EXECUTE IMMEDIATE 'ALTER SEQUENCE %(sequence)s MINVALUE 0 INCREMENT BY '||cval;
SELECT %(sequence)s.nextval INTO cval FROM dual;
EXECUTE IMMEDIATE 'ALTER SEQUENCE %(sequence)s INCREMENT BY 1';
END IF;
COMMIT;
END;
/"""
def get_sequence_name(table):
name_length = DatabaseOperations().max_name_length() - 3
return '%s_SQ' % util.truncate_name(table, name_length).upper()
def get_trigger_name(table):
name_length = DatabaseOperations().max_name_length() - 3
return '%s_TR' % util.truncate_name(table, name_length).upper()
| Python |
# Empty file. And this comment is to keep patch/diff happy
| Python |
VERSION = (1, 1, 2, 'alpha', 0)
| Python |
"""Support for writing JUnit XML test results for the regrtest"""
import os
import re
import sys
import time
import traceback
import unittest
from StringIO import StringIO
from xml.sax import saxutils
# Invalid XML characters (control chars)
EVIL_CHARACTERS_RE = re.compile(r"[\000-\010\013\014\016-\037]")
class JUnitXMLTestRunner:
"""A unittest runner that writes results to a JUnit XML file in
xml_dir
"""
def __init__(self, xml_dir):
self.xml_dir = xml_dir
def run(self, test):
result = JUnitXMLTestResult(self.xml_dir)
test(result)
result.write_xml()
return result
class JUnitXMLTestResult(unittest.TestResult):
"""JUnit XML test result writer.
The name of the file written to is determined from the full module
name of the first test ran
"""
def __init__(self, xml_dir):
unittest.TestResult.__init__(self)
self.xml_dir = xml_dir
# The module name of the first test ran
self.module_name = None
# All TestCases
self.tests = []
# Start time
self.start = None
self.old_stdout = sys.stdout
self.old_stderr = sys.stderr
sys.stdout = self.stdout = Tee(sys.stdout)
sys.stderr = self.stderr = Tee(sys.stderr)
def startTest(self, test):
unittest.TestResult.startTest(self, test)
self.ensure_module_name(test)
self.error, self.failure = None, None
self.start = time.time()
def stopTest(self, test):
took = time.time() - self.start
unittest.TestResult.stopTest(self, test)
args = [test, took]
if self.error:
args.extend(['error', self.error])
elif self.failure:
args.extend(['failure', self.failure])
self.tests.append(TestInfo.from_testcase(*args))
def addError(self, test, err):
unittest.TestResult.addError(self, test, err)
self.error = err
def addFailure(self, test, err):
unittest.TestResult.addFailure(self, test, err)
self.failure = err
def ensure_module_name(self, test):
"""Set self.module_name from test if not already set"""
if not self.module_name:
self.module_name = '.'.join(test.id().split('.')[:-1])
def write_xml(self):
if not self.module_name:
# No tests ran, nothing to write
return
took = time.time() - self.start
stdout = self.stdout.getvalue()
stderr = self.stderr.getvalue()
sys.stdout = self.old_stdout
sys.stderr = self.old_stderr
ensure_dir(self.xml_dir)
filename = os.path.join(self.xml_dir, 'TEST-%s.xml' % self.module_name)
print "JUnitXMLRunner: Writing test results to %s " % \
os.path.abspath(filename)
stream = open(filename, 'w')
write_testsuite_xml(stream, len(self.tests), len(self.errors),
len(self.failures), 0, self.module_name, took)
for info in self.tests:
info.write_xml(stream)
write_stdouterr_xml(stream, stdout, stderr)
stream.write('</testsuite>')
stream.close()
class TestInfo(object):
"""The JUnit XML <testcase/> model."""
def __init__(self, class_name, name, took, type=None, exc_info=None):
# The name of the test
self.class_name = class_name
self.name = name
# How long it took
self.took = took
# Type of test: 'error', 'failure' 'skipped', or None for a
# success
self.type = type
if exc_info:
self.exc_name = exc_name(exc_info)
self.message = exc_message(exc_info)
self.traceback = safe_str(''.join(
traceback.format_exception(*exc_info)))
else:
self.exc_name = self.message = self.traceback = ''
@classmethod
def from_testcase(cls, testcase, took, type=None, exc_info=None):
class_name, name = testcase.id().rsplit('.', 1)
return cls(class_name, name, took, type, exc_info)
def write_xml(self, stream):
stream.write(' <testcase classname="%s" name="%s" time="%.3f"' %
(self.class_name, self.name, self.took))
if not self.type:
# test was successful
stream.write('/>\n')
return
stream.write('>\n <%s type="%s" message=%s><![CDATA[%s]]></%s>\n' %
(self.type, self.exc_name, saxutils.quoteattr(self.message),
escape_cdata(self.traceback), self.type))
stream.write(' </testcase>\n')
class Tee(StringIO):
"""Writes data to this StringIO and a separate stream"""
def __init__(self, stream):
StringIO.__init__(self)
self.stream = stream
def write(self, data):
StringIO.write(self, data)
self.stream.write(data)
def flush(self):
StringIO.flush(self)
self.stream.flush()
def write_testsuite_xml(stream, tests, errors, failures, skipped, name, took):
"""Write the XML header (<testsuite/>)"""
stream.write('<?xml version="1.0" encoding="utf-8"?>\n')
stream.write('<testsuite tests="%d" errors="%d" failures="%d" ' %
(tests, errors, failures))
stream.write('skipped="%d" name="%s" time="%.3f">\n' % (skipped, name,
took))
def write_stdouterr_xml(stream, stdout, stderr):
"""Write the stdout/err tags"""
if stdout:
stream.write(' <system-out><![CDATA[%s]]></system-out>\n' %
escape_cdata(safe_str(stdout)))
if stderr:
stream.write(' <system-err><![CDATA[%s]]></system-err>\n' %
escape_cdata(safe_str(stderr)))
def write_direct_test(junit_xml_dir, name, took, type=None, exc_info=None,
stdout=None, stderr=None):
"""Write XML for a regrtest 'direct' test; a test which was ran on
import (which we label as __main__.__import__)
"""
return write_manual_test(junit_xml_dir, '%s.__main__' % name, '__import__',
took, type, exc_info, stdout, stderr)
def write_doctest(junit_xml_dir, name, took, type=None, exc_info=None,
stdout=None, stderr=None):
"""Write XML for a regrtest doctest, labeled as __main__.__doc__"""
return write_manual_test(junit_xml_dir, '%s.__main__' % name, '__doc__',
took, type, exc_info, stdout, stderr)
def write_manual_test(junit_xml_dir, module_name, test_name, took, type=None,
exc_info=None, stdout=None, stderr=None):
"""Manually write XML for one test, outside of unittest"""
errors = type == 'error' and 1 or 0
failures = type == 'failure' and 1 or 0
skipped = type == 'skipped' and 1 or 0
ensure_dir(junit_xml_dir)
stream = open(os.path.join(junit_xml_dir, 'TEST-%s.xml' % module_name),
'w')
write_testsuite_xml(stream, 1, errors, failures, skipped, module_name,
took)
info = TestInfo(module_name, test_name, took, type, exc_info)
info.write_xml(stream)
write_stdouterr_xml(stream, stdout, stderr)
stream.write('</testsuite>')
stream.close()
def ensure_dir(dir):
"""Ensure dir exists"""
if not os.path.exists(dir):
os.mkdir(dir)
def exc_name(exc_info):
"""Determine the full name of the exception that caused exc_info"""
exc = exc_info[1]
name = getattr(exc.__class__, '__module__', '')
if name:
name += '.'
return name + exc.__class__.__name__
def exc_message(exc_info):
"""Safely return a short message passed through safe_str describing
exc_info, being careful of unicode values.
"""
exc = exc_info[1]
if exc is None:
return safe_str(exc_info[0])
if isinstance(exc, BaseException) and isinstance(exc.message, unicode):
return safe_str(exc.message)
try:
return safe_str(str(exc))
except UnicodeEncodeError:
try:
val = unicode(exc)
return safe_str(val)
except UnicodeDecodeError:
return '?'
def escape_cdata(cdata):
"""Escape a string for an XML CDATA section"""
return cdata.replace(']]>', ']]>]]><![CDATA[')
def safe_str(base):
"""Return a str valid for UTF-8 XML from a basestring"""
if isinstance(base, unicode):
return remove_evil(base.encode('utf-8', 'replace'))
return remove_evil(base.decode('utf-8', 'replace').encode('utf-8',
'replace'))
def remove_evil(string):
"""Remove control characters from a string"""
return EVIL_CHARACTERS_RE.sub('?', string)
| Python |
"""
Django test runner to to spit out JUnit-compatible XML reports. Uses a slightly
adapted copy of Jython's JUnitXMLTestRunner
"""
import unittest
from doj.test.xmlrunner.junitxmlrunner import JUnitXMLTestRunner
from django.conf import settings
from django.test.utils import setup_test_environment, teardown_test_environment
from django.test.simple import build_test, build_suite
from django.db.models import get_app, get_apps
def run_tests(test_labels, verbosity=1, interactive=True, extra_tests=[]):
"""
Run the unit tests for all the test labels in the provided list.
Labels must be of the form:
- app.TestClass.test_method
Run a single specific test method
- app.TestClass
Run all the test methods in a given class
- app
Search for doctests and unittests in the named application.
When looking for tests, the test runner will look in the models and
tests modules for the application.
A list of 'extra' tests may also be provided; these tests
will be added to the test suite.
Returns the number of tests that failed.
"""
setup_test_environment()
settings.DEBUG = False
suite = unittest.TestSuite()
if test_labels:
for label in test_labels:
if '.' in label:
suite.addTest(build_test(label))
else:
app = get_app(label)
suite.addTest(build_suite(app))
else:
for app in get_apps():
suite.addTest(build_suite(app))
for test in extra_tests:
suite.addTest(test)
old_name = settings.DATABASE_NAME
from django.db import connection
connection.creation.create_test_db(verbosity, autoclobber=not interactive)
result = JUnitXMLTestRunner('.').run(suite)
connection.creation.destroy_test_db(old_name, verbosity)
teardown_test_environment()
return len(result.failures) + len(result.errors)
| Python |
#!/usr/bin/env python
import time
t = time.time()
u = time.gmtime(t)
s = time.strftime('%a, %e %b %Y %T GMT', u)
print 'Content-Type: text/javascript'
print 'Cache-Control: no-cache'
print 'Date: ' + s
print 'Expires: ' + s
print ''
print 'var timeskew = new Date().getTime() - ' + str(t*1000) + ';'
| Python |
import os
from google.appengine.ext.webapp import template
import cgi
from google.appengine.api import users
from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import run_wsgi_app
from google.appengine.ext import db
class Greeting(db.Model):
author = db.UserProperty()
content = db.StringProperty(multiline=True)
date = db.DateTimeProperty(auto_now_add=True)
class MainPage(webapp.RequestHandler):
def get(self):
greetings_query = Greeting.all().order('-date')
greetings = greetings_query.fetch(10)
if users.get_current_user():
url = users.create_logout_url(self.request.uri)
url_linktext = 'Logout'
else:
url = users.create_login_url(self.request.uri)
url_linktext = 'Login'
template_values = {
'greetings': greetings,
'url': url,
'url_linktext': url_linktext,
}
path = os.path.join(os.path.dirname(__file__), 'index.html')
self.response.out.write(template.render(path, template_values))
class Guestbook(webapp.RequestHandler):
def post(self):
greeting = Greeting()
if users.get_current_user():
greeting.author = users.get_current_user()
greeting.content = self.request.get('content')
greeting.put()
self.redirect('/')
application = webapp.WSGIApplication(
[('/', MainPage),
('/sign', Guestbook)],
debug=True)
def main():
run_wsgi_app(application)
if __name__ == "__main__":
main()
| Python |
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Convenience wrapper for starting an appengine tool."""
import os
import sys
if not hasattr(sys, 'version_info'):
sys.stderr.write('Very old versions of Python are not supported. Please '
'use version 2.5 or greater.\n')
sys.exit(1)
version_tuple = tuple(sys.version_info[:2])
if version_tuple < (2, 4):
sys.stderr.write('Error: Python %d.%d is not supported. Please use '
'version 2.5 or greater.\n' % version_tuple)
sys.exit(1)
if version_tuple == (2, 4):
sys.stderr.write('Warning: Python 2.4 is not supported; this program may '
'break. Please use version 2.5 or greater.\n')
DIR_PATH = os.path.abspath(os.path.dirname(os.path.realpath(__file__)))
SCRIPT_DIR = os.path.join(DIR_PATH, 'google', 'appengine', 'tools')
EXTRA_PATHS = [
DIR_PATH,
os.path.join(DIR_PATH, 'lib', 'antlr3'),
os.path.join(DIR_PATH, 'lib', 'django'),
os.path.join(DIR_PATH, 'lib', 'webob'),
os.path.join(DIR_PATH, 'lib', 'yaml', 'lib'),
]
SCRIPT_EXCEPTIONS = {
"dev_appserver.py" : "dev_appserver_main.py"
}
def run_file(file_path, globals_, script_dir=SCRIPT_DIR):
"""Execute the file at the specified path with the passed-in globals."""
sys.path = EXTRA_PATHS + sys.path
script_name = os.path.basename(file_path)
script_name = SCRIPT_EXCEPTIONS.get(script_name, script_name)
script_path = os.path.join(script_dir, script_name)
execfile(script_path, globals_)
if __name__ == '__main__':
run_file(__file__, globals())
| Python |
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Convenience wrapper for starting bulkload_client.py"""
import os
import sys
sys.stderr.write("This version of bulkload_client.py has been deprecated; "
"please use the version at the root of your Google App "
"Engine SDK install.")
if not hasattr(sys, 'version_info'):
sys.stderr.write('Very old versions of Python are not supported. Please '
'use version 2.5 or greater.\n')
sys.exit(1)
version_tuple = tuple(sys.version_info[:2])
if version_tuple < (2, 4):
sys.stderr.write('Error: Python %d.%d is not supported. Please use '
'version 2.5 or greater.\n' % version_tuple)
sys.exit(1)
if version_tuple == (2, 4):
sys.stderr.write('Warning: Python 2.4 is not supported; this program may '
'break. Please use version 2.5 or greater.\n')
BULKLOAD_CLIENT_PATH = 'google/appengine/tools/bulkload_client.py'
DIR_PATH = os.path.abspath(os.path.dirname(
os.path.dirname(os.path.realpath(__file__))))
EXTRA_PATHS = [
DIR_PATH,
os.path.join(DIR_PATH, 'lib', 'django'),
os.path.join(DIR_PATH, 'lib', 'webob'),
os.path.join(DIR_PATH, 'lib', 'yaml', 'lib'),
]
if __name__ == '__main__':
sys.path = EXTRA_PATHS + sys.path
script_path = os.path.join(DIR_PATH, BULKLOAD_CLIENT_PATH)
execfile(script_path, globals())
| Python |
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Convenience wrapper for starting bulkload_client.py"""
import os
import sys
sys.stderr.write("This version of bulkload_client.py has been deprecated; "
"please use the version at the root of your Google App "
"Engine SDK install.")
if not hasattr(sys, 'version_info'):
sys.stderr.write('Very old versions of Python are not supported. Please '
'use version 2.5 or greater.\n')
sys.exit(1)
version_tuple = tuple(sys.version_info[:2])
if version_tuple < (2, 4):
sys.stderr.write('Error: Python %d.%d is not supported. Please use '
'version 2.5 or greater.\n' % version_tuple)
sys.exit(1)
if version_tuple == (2, 4):
sys.stderr.write('Warning: Python 2.4 is not supported; this program may '
'break. Please use version 2.5 or greater.\n')
BULKLOAD_CLIENT_PATH = 'google/appengine/tools/bulkload_client.py'
DIR_PATH = os.path.abspath(os.path.dirname(
os.path.dirname(os.path.realpath(__file__))))
EXTRA_PATHS = [
DIR_PATH,
os.path.join(DIR_PATH, 'lib', 'django'),
os.path.join(DIR_PATH, 'lib', 'webob'),
os.path.join(DIR_PATH, 'lib', 'yaml', 'lib'),
]
if __name__ == '__main__':
sys.path = EXTRA_PATHS + sys.path
script_path = os.path.join(DIR_PATH, BULKLOAD_CLIENT_PATH)
execfile(script_path, globals())
| Python |
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Convenience wrapper for starting an appengine tool."""
import os
import sys
if not hasattr(sys, 'version_info'):
sys.stderr.write('Very old versions of Python are not supported. Please '
'use version 2.5 or greater.\n')
sys.exit(1)
version_tuple = tuple(sys.version_info[:2])
if version_tuple < (2, 4):
sys.stderr.write('Error: Python %d.%d is not supported. Please use '
'version 2.5 or greater.\n' % version_tuple)
sys.exit(1)
if version_tuple == (2, 4):
sys.stderr.write('Warning: Python 2.4 is not supported; this program may '
'break. Please use version 2.5 or greater.\n')
DIR_PATH = os.path.abspath(os.path.dirname(os.path.realpath(__file__)))
SCRIPT_DIR = os.path.join(DIR_PATH, 'google', 'appengine', 'tools')
EXTRA_PATHS = [
DIR_PATH,
os.path.join(DIR_PATH, 'lib', 'antlr3'),
os.path.join(DIR_PATH, 'lib', 'django'),
os.path.join(DIR_PATH, 'lib', 'webob'),
os.path.join(DIR_PATH, 'lib', 'yaml', 'lib'),
]
SCRIPT_EXCEPTIONS = {
"dev_appserver.py" : "dev_appserver_main.py"
}
def run_file(file_path, globals_, script_dir=SCRIPT_DIR):
"""Execute the file at the specified path with the passed-in globals."""
sys.path = EXTRA_PATHS + sys.path
script_name = os.path.basename(file_path)
script_name = SCRIPT_EXCEPTIONS.get(script_name, script_name)
script_path = os.path.join(script_dir, script_name)
execfile(script_path, globals_)
if __name__ == '__main__':
run_file(__file__, globals())
| Python |
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Convenience wrapper for starting an appengine tool."""
import os
import sys
if not hasattr(sys, 'version_info'):
sys.stderr.write('Very old versions of Python are not supported. Please '
'use version 2.5 or greater.\n')
sys.exit(1)
version_tuple = tuple(sys.version_info[:2])
if version_tuple < (2, 4):
sys.stderr.write('Error: Python %d.%d is not supported. Please use '
'version 2.5 or greater.\n' % version_tuple)
sys.exit(1)
if version_tuple == (2, 4):
sys.stderr.write('Warning: Python 2.4 is not supported; this program may '
'break. Please use version 2.5 or greater.\n')
DIR_PATH = os.path.abspath(os.path.dirname(os.path.realpath(__file__)))
SCRIPT_DIR = os.path.join(DIR_PATH, 'google', 'appengine', 'tools')
EXTRA_PATHS = [
DIR_PATH,
os.path.join(DIR_PATH, 'lib', 'antlr3'),
os.path.join(DIR_PATH, 'lib', 'django'),
os.path.join(DIR_PATH, 'lib', 'webob'),
os.path.join(DIR_PATH, 'lib', 'yaml', 'lib'),
]
SCRIPT_EXCEPTIONS = {
"dev_appserver.py" : "dev_appserver_main.py"
}
def run_file(file_path, globals_, script_dir=SCRIPT_DIR):
"""Execute the file at the specified path with the passed-in globals."""
sys.path = EXTRA_PATHS + sys.path
script_name = os.path.basename(file_path)
script_name = SCRIPT_EXCEPTIONS.get(script_name, script_name)
script_path = os.path.join(script_dir, script_name)
execfile(script_path, globals_)
if __name__ == '__main__':
run_file(__file__, globals())
| Python |
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Convenience wrapper for starting an appengine tool."""
import os
import sys
if not hasattr(sys, 'version_info'):
sys.stderr.write('Very old versions of Python are not supported. Please '
'use version 2.5 or greater.\n')
sys.exit(1)
version_tuple = tuple(sys.version_info[:2])
if version_tuple < (2, 4):
sys.stderr.write('Error: Python %d.%d is not supported. Please use '
'version 2.5 or greater.\n' % version_tuple)
sys.exit(1)
if version_tuple == (2, 4):
sys.stderr.write('Warning: Python 2.4 is not supported; this program may '
'break. Please use version 2.5 or greater.\n')
DIR_PATH = os.path.abspath(os.path.dirname(os.path.realpath(__file__)))
SCRIPT_DIR = os.path.join(DIR_PATH, 'google', 'appengine', 'tools')
EXTRA_PATHS = [
DIR_PATH,
os.path.join(DIR_PATH, 'lib', 'antlr3'),
os.path.join(DIR_PATH, 'lib', 'django'),
os.path.join(DIR_PATH, 'lib', 'webob'),
os.path.join(DIR_PATH, 'lib', 'yaml', 'lib'),
]
SCRIPT_EXCEPTIONS = {
"dev_appserver.py" : "dev_appserver_main.py"
}
def run_file(file_path, globals_, script_dir=SCRIPT_DIR):
"""Execute the file at the specified path with the passed-in globals."""
sys.path = EXTRA_PATHS + sys.path
script_name = os.path.basename(file_path)
script_name = SCRIPT_EXCEPTIONS.get(script_name, script_name)
script_path = os.path.join(script_dir, script_name)
execfile(script_path, globals_)
if __name__ == '__main__':
run_file(__file__, globals())
| Python |
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Convenience wrapper for starting an appengine tool."""
import os
import sys
if not hasattr(sys, 'version_info'):
sys.stderr.write('Very old versions of Python are not supported. Please '
'use version 2.5 or greater.\n')
sys.exit(1)
version_tuple = tuple(sys.version_info[:2])
if version_tuple < (2, 4):
sys.stderr.write('Error: Python %d.%d is not supported. Please use '
'version 2.5 or greater.\n' % version_tuple)
sys.exit(1)
if version_tuple == (2, 4):
sys.stderr.write('Warning: Python 2.4 is not supported; this program may '
'break. Please use version 2.5 or greater.\n')
DIR_PATH = os.path.abspath(os.path.dirname(os.path.realpath(__file__)))
SCRIPT_DIR = os.path.join(DIR_PATH, 'google', 'appengine', 'tools')
EXTRA_PATHS = [
DIR_PATH,
os.path.join(DIR_PATH, 'lib', 'antlr3'),
os.path.join(DIR_PATH, 'lib', 'django'),
os.path.join(DIR_PATH, 'lib', 'webob'),
os.path.join(DIR_PATH, 'lib', 'yaml', 'lib'),
]
SCRIPT_EXCEPTIONS = {
"dev_appserver.py" : "dev_appserver_main.py"
}
def run_file(file_path, globals_, script_dir=SCRIPT_DIR):
"""Execute the file at the specified path with the passed-in globals."""
sys.path = EXTRA_PATHS + sys.path
script_name = os.path.basename(file_path)
script_name = SCRIPT_EXCEPTIONS.get(script_name, script_name)
script_path = os.path.join(script_dir, script_name)
execfile(script_path, globals_)
if __name__ == '__main__':
run_file(__file__, globals())
| Python |
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Convenience wrapper for starting an appengine tool."""
import os
import sys
if not hasattr(sys, 'version_info'):
sys.stderr.write('Very old versions of Python are not supported. Please '
'use version 2.5 or greater.\n')
sys.exit(1)
version_tuple = tuple(sys.version_info[:2])
if version_tuple < (2, 4):
sys.stderr.write('Error: Python %d.%d is not supported. Please use '
'version 2.5 or greater.\n' % version_tuple)
sys.exit(1)
if version_tuple == (2, 4):
sys.stderr.write('Warning: Python 2.4 is not supported; this program may '
'break. Please use version 2.5 or greater.\n')
DIR_PATH = os.path.abspath(os.path.dirname(os.path.realpath(__file__)))
SCRIPT_DIR = os.path.join(DIR_PATH, 'google', 'appengine', 'tools')
EXTRA_PATHS = [
DIR_PATH,
os.path.join(DIR_PATH, 'lib', 'antlr3'),
os.path.join(DIR_PATH, 'lib', 'django'),
os.path.join(DIR_PATH, 'lib', 'webob'),
os.path.join(DIR_PATH, 'lib', 'yaml', 'lib'),
]
SCRIPT_EXCEPTIONS = {
"dev_appserver.py" : "dev_appserver_main.py"
}
def run_file(file_path, globals_, script_dir=SCRIPT_DIR):
"""Execute the file at the specified path with the passed-in globals."""
sys.path = EXTRA_PATHS + sys.path
script_name = os.path.basename(file_path)
script_name = SCRIPT_EXCEPTIONS.get(script_name, script_name)
script_path = os.path.join(script_dir, script_name)
execfile(script_path, globals_)
if __name__ == '__main__':
run_file(__file__, globals())
| Python |
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import wsgiref.handlers
from google.appengine.ext import webapp
class MainHandler(webapp.RequestHandler):
def get(self):
self.response.out.write('Hello world!')
def main():
application = webapp.WSGIApplication([('/', MainHandler)],
debug=True)
wsgiref.handlers.CGIHandler().run(application)
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import wsgiref.handlers
from google.appengine.ext import webapp
class MainHandler(webapp.RequestHandler):
def get(self):
self.response.out.write('Hello world!')
def main():
application = webapp.WSGIApplication([('/', MainHandler)],
debug=True)
wsgiref.handlers.CGIHandler().run(application)
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Convenience wrapper for starting an appengine tool."""
import os
import sys
if not hasattr(sys, 'version_info'):
sys.stderr.write('Very old versions of Python are not supported. Please '
'use version 2.5 or greater.\n')
sys.exit(1)
version_tuple = tuple(sys.version_info[:2])
if version_tuple < (2, 4):
sys.stderr.write('Error: Python %d.%d is not supported. Please use '
'version 2.5 or greater.\n' % version_tuple)
sys.exit(1)
if version_tuple == (2, 4):
sys.stderr.write('Warning: Python 2.4 is not supported; this program may '
'break. Please use version 2.5 or greater.\n')
DIR_PATH = os.path.abspath(os.path.dirname(os.path.realpath(__file__)))
SCRIPT_DIR = os.path.join(DIR_PATH, 'google', 'appengine', 'tools')
EXTRA_PATHS = [
DIR_PATH,
os.path.join(DIR_PATH, 'lib', 'antlr3'),
os.path.join(DIR_PATH, 'lib', 'django'),
os.path.join(DIR_PATH, 'lib', 'webob'),
os.path.join(DIR_PATH, 'lib', 'yaml', 'lib'),
]
SCRIPT_EXCEPTIONS = {
"dev_appserver.py" : "dev_appserver_main.py"
}
def run_file(file_path, globals_, script_dir=SCRIPT_DIR):
"""Execute the file at the specified path with the passed-in globals."""
sys.path = EXTRA_PATHS + sys.path
script_name = os.path.basename(file_path)
script_name = SCRIPT_EXCEPTIONS.get(script_name, script_name)
script_path = os.path.join(script_dir, script_name)
execfile(script_path, globals_)
if __name__ == '__main__':
run_file(__file__, globals())
| Python |
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Generic exceptions.
"""
class TimeoutException(Exception):
def __init__(self, msg=""):
Exception.__init__(self, msg)
class NestedException(Exception):
def __init__(self, exc_info):
Exception.__init__(self, exc_info[1])
self.exc_info_ = exc_info
def exc_info(self):
return self.exc_info_
class AbstractMethod(Exception):
"""Raise this exception to indicate that a method is abstract. Example:
class Foo:
def Bar(self):
raise gexcept.AbstractMethod"""
def __init__(self):
Exception.__init__(self)
| Python |
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Generic exceptions.
"""
class TimeoutException(Exception):
def __init__(self, msg=""):
Exception.__init__(self, msg)
class NestedException(Exception):
def __init__(self, exc_info):
Exception.__init__(self, exc_info[1])
self.exc_info_ = exc_info
def exc_info(self):
return self.exc_info_
class AbstractMethod(Exception):
"""Raise this exception to indicate that a method is abstract. Example:
class Foo:
def Bar(self):
raise gexcept.AbstractMethod"""
def __init__(self):
Exception.__init__(self)
| Python |
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
| Python |
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
| Python |
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
| Python |
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Sends email on behalf of application.
Provides functions for application developers to provide email services
for their applications. Also provides a few utility methods.
"""
from email import MIMEBase
from email import MIMEMultipart
from email import MIMEText
import mimetypes
import types
from google.appengine.api import api_base_pb
from google.appengine.api import apiproxy_stub_map
from google.appengine.api import mail_service_pb
from google.appengine.api import users
from google.appengine.api.mail_errors import *
from google.appengine.runtime import apiproxy_errors
ERROR_MAP = {
mail_service_pb.MailServiceError.BAD_REQUEST:
BadRequestError,
mail_service_pb.MailServiceError.UNAUTHORIZED_SENDER:
InvalidSenderError,
mail_service_pb.MailServiceError.INVALID_ATTACHMENT_TYPE:
InvalidAttachmentTypeError,
}
EXTENSION_WHITELIST = set([
'bmp',
'css',
'csv',
'gif',
'html', 'htm',
'jpeg', 'jpg', 'jpe',
'pdf',
'png',
'rss',
'text', 'txt', 'asc', 'diff', 'pot',
'tiff', 'tif',
'wbmp',
])
def invalid_email_reason(email_address, field):
"""Determine reason why email is invalid
Args:
email_address: Email to check.
Returns:
String indicating invalid email reason if there is one,
else None.
"""
if email_address is None:
return 'None email address for %s.' % field
if isinstance(email_address, users.User):
email_address = email_address.email()
if not isinstance(email_address, types.StringTypes):
return 'Invalid email address type for %s.' % field
stripped_address = email_address.strip()
if not stripped_address:
return 'Empty email address for %s.' % field
return None
InvalidEmailReason = invalid_email_reason
def is_email_valid(email_address):
"""Determine if email is invalid.
Args:
email_address: Email to check.
Returns:
True if email is valid, else False.
"""
return invalid_email_reason(email_address, '') is None
IsEmailValid = is_email_valid
def check_email_valid(email_address, field):
"""Check that email is valid
Args:
email_address: Email to check.
Raises:
InvalidEmailError if email_address is invalid.
"""
reason = invalid_email_reason(email_address, field)
if reason is not None:
raise InvalidEmailError(reason)
CheckEmailValid = check_email_valid
def _email_check_and_list(emails, field):
"""Generate a list of emails.
Args:
emails: Single email or list of emails.
Returns:
Sequence of email addresses.
Raises:
InvalidEmailError if any email addresses are invalid.
"""
if isinstance(emails, types.StringTypes):
check_email_valid(value)
else:
for address in iter(emails):
check_email_valid(address, field)
def _email_sequence(emails):
"""Forces email to be sequenceable type.
Iterable values are returned as is. This function really just wraps the case
where there is a single email string.
Args:
emails: Emails (or email) to coerce to sequence.
Returns:
Single tuple with email in it if only one email string provided,
else returns emails as is.
"""
if isinstance(emails, types.StringTypes):
return emails,
return emails
def _attachment_sequence(attachments):
"""Forces attachments to be sequenceable type.
Iterable values are returned as is. This function really just wraps the case
where there is a single attachment.
Args:
attachments: Attachments (or attachment) to coerce to sequence.
Returns:
Single tuple with attachment tuple in it if only one attachment provided,
else returns attachments as is.
"""
if len(attachments) == 2 and isinstance(attachments[0], types.StringTypes):
return attachments,
return attachments
def send_mail(sender,
to,
subject,
body,
make_sync_call=apiproxy_stub_map.MakeSyncCall,
**kw):
"""Sends mail on behalf of application.
Args:
sender: Sender email address as appears in the 'from' email line.
to: List of 'to' addresses or a single address.
subject: Message subject string.
body: Body of type text/plain.
make_sync_call: Function used to make sync call to API proxy.
kw: Keyword arguments compatible with EmailMessage keyword based
constructor.
Raises:
InvalidEmailError when invalid email address provided.
"""
kw['sender'] = sender
kw['to'] = to
kw['subject'] = subject
kw['body'] = body
message = EmailMessage(**kw)
message.send(make_sync_call)
SendMail = send_mail
def send_mail_to_admins(sender,
subject,
body,
make_sync_call=apiproxy_stub_map.MakeSyncCall,
**kw):
"""Sends mail to admins on behalf of application.
Args:
sender: Sender email address as appears in the 'from' email line.
subject: Message subject string.
body: Body of type text/plain.
make_sync_call: Function used to make sync call to API proxy.
kw: Keyword arguments compatible with EmailMessage keyword based
constructor.
Raises:
InvalidEmailError when invalid email address provided.
"""
kw['sender'] = sender
kw['subject'] = subject
kw['body'] = body
message = AdminEmailMessage(**kw)
message.send(make_sync_call)
SendMailToAdmins = send_mail_to_admins
def mail_message_to_mime_message(protocol_message):
"""Generate a MIMEMultitype message from protocol buffer.
Generates a complete MIME multi-part email object from a MailMessage
protocol buffer. The body fields are sent as individual alternatives
if they are both present, otherwise, only one body part is sent.
Multiple entry email fields such as 'To', 'Cc' and 'Bcc' are converted
to a list of comma separated email addresses.
Args:
message: Message PB to convert to MIMEMultitype.
Returns:
MIMEMultitype representing the provided MailMessage.
"""
parts = []
if protocol_message.has_textbody():
parts.append(MIMEText.MIMEText(protocol_message.textbody()))
if protocol_message.has_htmlbody():
parts.append(MIMEText.MIMEText(protocol_message.htmlbody(),
_subtype='html'))
if len(parts) == 1:
payload = parts
else:
payload = [MIMEMultipart.MIMEMultipart('alternative', _subparts=parts)]
result = MIMEMultipart.MIMEMultipart(_subparts=payload)
for attachment in protocol_message.attachment_list():
mime_type, encoding = mimetypes.guess_type(attachment.filename())
assert mime_type is not None
maintype, subtype = mime_type.split('/')
mime_attachment = MIMEBase.MIMEBase(maintype, subtype)
mime_attachment.add_header('Content-Disposition',
'attachment',
filename=attachment.filename())
mime_attachment.set_charset(encoding)
mime_attachment.set_payload(attachment.data())
result.attach(mime_attachment)
if protocol_message.to_size():
result['To'] = ', '.join(protocol_message.to_list())
if protocol_message.cc_size():
result['Cc'] = ', '.join(protocol_message.cc_list())
if protocol_message.bcc_size():
result['Bcc'] = ', '.join(protocol_message.bcc_list())
result['From'] = protocol_message.sender()
result['ReplyTo'] = protocol_message.replyto()
result['Subject'] = protocol_message.subject()
return result
MailMessageToMIMEMessage = mail_message_to_mime_message
def _to_str(value):
"""Helper function to make sure unicode values converted to utf-8
Args:
value: str or unicode to convert to utf-8.
Returns:
UTF-8 encoded str of value, otherwise value unchanged.
"""
if isinstance(value, unicode):
return value.encode('utf-8')
return value
class _EmailMessageBase(object):
"""Base class for email API service objects.
Subclasses must define a class variable called _API_CALL with the name
of its underlying mail sending API call.
"""
PROPERTIES = set([
'sender',
'reply_to',
'subject',
'body',
'html',
'attachments',
])
def __init__(self, **kw):
"""Initialize Email message.
Creates new MailMessage protocol buffer and initializes it with any
keyword arguments.
Args:
kw: List of keyword properties as defined by PROPERTIES.
"""
self.initialize(**kw)
def initialize(self, **kw):
"""Keyword initialization.
Used to set all fields of the email message using keyword arguments.
Args:
kw: List of keyword properties as defined by PROPERTIES.
"""
for name, value in kw.iteritems():
setattr(self, name, value)
def Initialize(self, **kw):
self.initialize(**kw)
def check_initialized(self):
"""Check if EmailMessage is properly initialized.
Test used to determine if EmailMessage meets basic requirements
for being used with the mail API. This means that the following
fields must be set or have at least one value in the case of
multi value fields:
- Subject must be set.
- A recipient must be specified.
- Must contain a body.
This check does not include determining if the sender is actually
authorized to send email for the application.
Raises:
Appropriate exception for initialization failure.
InvalidAttachmentTypeError: Use of incorrect attachment type.
MissingRecipientsError: No recipients specified in to, cc or bcc.
MissingSenderError: No sender specified.
MissingSubjectError: Subject is not specified.
MissingBodyError: No body specified.
"""
if not hasattr(self, 'sender'):
raise MissingSenderError()
if not hasattr(self, 'subject'):
raise MissingSubjectError()
if not hasattr(self, 'body') and not hasattr(self, 'html'):
raise MissingBodyError()
if hasattr(self, 'attachments'):
for filename, data in _attachment_sequence(self.attachments):
split_filename = filename.split('.')
if len(split_filename) < 2:
raise InvalidAttachmentTypeError()
if split_filename[-1] not in EXTENSION_WHITELIST:
raise InvalidAttachmentTypeError()
mime_type, encoding = mimetypes.guess_type(filename)
if mime_type is None:
raise InvalidAttachmentTypeError()
def CheckInitialized(self):
self.check_initialized()
def is_initialized(self):
"""Determine if EmailMessage is properly initialized.
Returns:
True if message is properly initializes, otherwise False.
"""
try:
self.check_initialized()
return True
except Error:
return False
def IsInitialized(self):
return self.is_initialized()
def ToProto(self):
"""Convert mail message to protocol message.
Unicode strings are converted to UTF-8 for all fields.
This method is overriden by EmailMessage to support the sender fields.
Returns:
MailMessage protocol version of mail message.
"""
self.check_initialized()
message = mail_service_pb.MailMessage()
message.set_sender(_to_str(self.sender))
if hasattr(self, 'reply_to'):
message.set_replyto(_to_str(self.reply_to))
message.set_subject(_to_str(self.subject))
if hasattr(self, 'body'):
message.set_textbody(_to_str(self.body))
if hasattr(self, 'html'):
message.set_htmlbody(_to_str(self.html))
if hasattr(self, 'attachments'):
for file_name, data in _attachment_sequence(self.attachments):
attachment = message.add_attachment()
attachment.set_filename(_to_str(file_name))
attachment.set_data(_to_str(data))
return message
def to_mime_message(self):
"""Generate a MIMEMultitype message from EmailMessage.
Calls MailMessageToMessage after converting self to protocol
buffer. Protocol buffer is better at handing corner cases
than EmailMessage class.
Returns:
MIMEMultitype representing the provided MailMessage.
Raises:
Appropriate exception for initialization failure.
InvalidAttachmentTypeError: Use of incorrect attachment type.
MissingSenderError: No sender specified.
MissingSubjectError: Subject is not specified.
MissingBodyError: No body specified.
"""
return mail_message_to_mime_message(self.ToProto())
def ToMIMEMessage(self):
return self.to_mime_message()
def send(self, make_sync_call=apiproxy_stub_map.MakeSyncCall):
"""Send email message.
Send properly initialized email message via email API.
Args:
make_sync_call: Method which will make synchronous call to api proxy.
Raises:
Errors defined in this file above.
"""
message = self.ToProto()
response = api_base_pb.VoidProto()
try:
make_sync_call('mail', self._API_CALL, message, response)
except apiproxy_errors.ApplicationError, e:
if e.application_error in ERROR_MAP:
raise ERROR_MAP[e.application_error](e.error_detail)
raise e
def Send(self, *args, **kwds):
self.send(*args, **kwds)
def _check_attachment(self, attachment):
file_name, data = attachment
if not (isinstance(file_name, types.StringTypes) or
isinstance(data, types.StringTypes)):
raise TypeError()
def _check_attachments(self, attachments):
"""Checks values going to attachment field.
Mainly used to check type safety of the values. Each value of the list
must be a pair of the form (file_name, data), and both values a string
type.
Args:
attachments: Collection of attachment tuples.
Raises:
TypeError if values are not string type.
"""
if len(attachments) == 2 and isinstance(attachments[0], types.StringTypes):
self._check_attachment(attachments)
else:
for attachment in attachments:
self._check_attachment(attachment)
def __setattr__(self, attr, value):
"""Property setting access control.
Controls write access to email fields.
Args:
attr: Attribute to access.
value: New value for field.
"""
if attr in ['sender', 'reply_to']:
check_email_valid(value, attr)
if not value:
raise ValueError('May not set empty value for \'%s\'' % attr)
if attr not in self.PROPERTIES:
raise AttributeError('\'EmailMessage\' has no attribute \'%s\'' % attr)
if attr == 'attachments':
self._check_attachments(value)
super(_EmailMessageBase, self).__setattr__(attr, value)
class EmailMessage(_EmailMessageBase):
"""Main interface to email API service.
This class is used to programmatically build an email message to send via
the Mail API. The usage is to construct an instance, populate its fields
and call Send().
Example Usage:
An EmailMessage can be built completely by the constructor.
EmailMessage(sender='sender@nowhere.com',
to='recipient@nowhere.com',
subject='a subject',
body='This is an email to you').Send()
It might be desirable for an application to build an email in different
places throughout the code. For this, EmailMessage is mutable.
message = EmailMessage()
message.sender = 'sender@nowhere.com'
message.to = ['recipient1@nowhere.com', 'recipient2@nowhere.com']
message.subject = 'a subject'
message.body = 'This is an email to you')
message.check_initialized()
message.send()
"""
_API_CALL = 'Send'
PROPERTIES = _EmailMessageBase.PROPERTIES
PROPERTIES.update(('to', 'cc', 'bcc'))
def check_initialized(self):
"""Provide additional checks to ensure recipients have been specified.
Raises:
MissingRecipientError when no recipients specified in to, cc or bcc.
"""
if (not hasattr(self, 'to') and
not hasattr(self, 'cc') and
not hasattr(self, 'bcc')):
raise MissingRecipientsError()
super(EmailMessage, self).check_initialized()
def CheckInitialized(self):
self.check_initialized()
def ToProto(self):
"""Does addition conversion of recipient fields to protocol buffer.
Returns:
MailMessage protocol version of mail message including sender fields.
"""
message = super(EmailMessage, self).ToProto()
for attribute, adder in (('to', message.add_to),
('cc', message.add_cc),
('bcc', message.add_bcc)):
if hasattr(self, attribute):
for address in _email_sequence(getattr(self, attribute)):
adder(_to_str(address))
return message
def __setattr__(self, attr, value):
"""Provides additional checks on recipient fields."""
if attr in ['to', 'cc', 'bcc']:
if isinstance(value, types.StringTypes):
check_email_valid(value, attr)
else:
_email_check_and_list(value, attr)
super(EmailMessage, self).__setattr__(attr, value)
class AdminEmailMessage(_EmailMessageBase):
"""Interface to sending email messages to all admins via the amil API.
This class is used to programmatically build an admin email message to send
via the Mail API. The usage is to construct an instance, populate its fields
and call Send().
Unlike the normal email message, addresses in the recipient fields are
ignored and not used for sending.
Example Usage:
An AdminEmailMessage can be built completely by the constructor.
AdminEmailMessage(sender='sender@nowhere.com',
subject='a subject',
body='This is an email to you').Send()
It might be desirable for an application to build an admin email in
different places throughout the code. For this, AdminEmailMessage is
mutable.
message = AdminEmailMessage()
message.sender = 'sender@nowhere.com'
message.subject = 'a subject'
message.body = 'This is an email to you')
message.check_initialized()
message.send()
"""
_API_CALL = 'SendToAdmins'
| Python |
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""CronInfo tools.
A library for working with CronInfo records, describing cron entries for an
application. Supports loading the records from yaml.
"""
import logging
import sys
import traceback
try:
import pytz
except ImportError:
pytz = None
from google.appengine.cron import groc
from google.appengine.api import validation
from google.appengine.api import yaml_builder
from google.appengine.api import yaml_listener
from google.appengine.api import yaml_object
_URL_REGEX = r'^/.*$'
_TIMEZONE_REGEX = r'^.{0,100}$'
_DESCRIPTION_REGEX = r'^.{0,499}$'
class GrocValidator(validation.Validator):
"""Checks that a schedule is in valid groc format."""
def Validate(self, value):
"""Validates a schedule."""
if value is None:
raise validation.MissingAttribute('schedule must be specified')
if not isinstance(value, basestring):
raise TypeError('schedule must be a string, not \'%r\''%type(value))
schedule = groc.CreateParser(value)
try:
schedule.timespec()
except groc.GrocException, e:
raise validation.ValidationError('schedule \'%s\' failed to parse: %s'%(
value, e.args[0]))
return value
class TimezoneValidator(validation.Validator):
"""Checks that a timezone can be correctly parsed and is known."""
def Validate(self, value):
"""Validates a timezone."""
if value is None:
return
if not isinstance(value, basestring):
raise TypeError('timezone must be a string, not \'%r\'' % type(value))
if pytz is None:
return value
try:
pytz.timezone(value)
except pytz.UnknownTimeZoneError:
raise validation.ValidationError('timezone \'%s\' is unknown' % value)
except IOError:
return value
except:
e, v, t = sys.exc_info()
logging.warning("pytz raised an unexpected error: %s.\n" % (v) +
"Traceback:\n" + "\n".join(traceback.format_tb(t)))
raise
return value
CRON = 'cron'
URL = 'url'
SCHEDULE = 'schedule'
TIMEZONE = 'timezone'
DESCRIPTION = 'description'
class MalformedCronfigurationFile(Exception):
"""Configuration file for Cron is malformed."""
pass
class CronEntry(validation.Validated):
"""A cron entry describes a single cron job."""
ATTRIBUTES = {
URL: _URL_REGEX,
SCHEDULE: GrocValidator(),
TIMEZONE: TimezoneValidator(),
DESCRIPTION: validation.Optional(_DESCRIPTION_REGEX)
}
class CronInfoExternal(validation.Validated):
"""CronInfoExternal describes all cron entries for an application."""
ATTRIBUTES = {
CRON: validation.Optional(validation.Repeated(CronEntry))
}
def LoadSingleCron(cron_info):
"""Load a cron.yaml file or string and return a CronInfoExternal object."""
builder = yaml_object.ObjectBuilder(CronInfoExternal)
handler = yaml_builder.BuilderHandler(builder)
listener = yaml_listener.EventListener(handler)
listener.Parse(cron_info)
cron_info = handler.GetResults()
if len(cron_info) < 1:
raise MalformedCronfigurationFile('Empty cron configuration.')
if len(cron_info) > 1:
raise MalformedCronfigurationFile('Multiple cron sections '
'in configuration.')
return cron_info[0]
| Python |
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Logging utilities for use by applications.
Classes defined here:
AppLogsHandler: StreamHandler subclass
"""
import logging
import sys
import types
NEWLINE_REPLACEMENT = "\0"
class AppLogsHandler(logging.StreamHandler):
"""Logging handler that will direct output to a persistent store of
application logs.
This handler will output log statements to stderr. This handler is
automatically initialized and attached to the Python common logging library.
"""
def __init__(self, stream=None):
"""Constructor.
Args:
# stream is optional. it defaults to sys.stderr.
stream: destination for output
"""
logging.StreamHandler.__init__(self, stream)
def close(self):
"""Closes the stream.
This implementation based on the implementation of FileHandler.close()."""
self.flush()
self.stream.close()
logging.StreamHandler.close(self)
def emit(self, record):
"""Emit a record.
This implementation is based on the implementation of
StreamHandler.emit()."""
try:
message = self._AppLogsMessage(record)
self.stream.write(message.encode("UTF-8"))
self.flush()
except (KeyboardInterrupt, SystemExit):
raise
except:
self.handleError(record)
def _AppLogsMessage(self, record):
"""Converts the log record into a log line."""
message = self.format(record).replace("\n", NEWLINE_REPLACEMENT)
return "LOG %d %d %s\n" % (self._AppLogsLevel(record.levelno),
long(record.created * 1000 * 1000),
message)
def _AppLogsLevel(self, level):
"""Converts the logging level used in Python to the API logging level"""
if level >= logging.CRITICAL:
return 4
elif level >= logging.ERROR:
return 3
elif level >= logging.WARNING:
return 2
elif level >= logging.INFO:
return 1
else:
return 0
| Python |
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Stub version of the images API."""
import logging
import StringIO
try:
import PIL
from PIL import _imaging
from PIL import Image
except ImportError:
import _imaging
import Image
from google.appengine.api import apiproxy_stub
from google.appengine.api import images
from google.appengine.api.images import images_service_pb
from google.appengine.runtime import apiproxy_errors
class ImagesServiceStub(apiproxy_stub.APIProxyStub):
"""Stub version of images API to be used with the dev_appserver."""
def __init__(self, service_name='images'):
"""Preloads PIL to load all modules in the unhardened environment.
Args:
service_name: Service name expected for all calls.
"""
super(ImagesServiceStub, self).__init__(service_name)
Image.init()
def _Dynamic_Transform(self, request, response):
"""Trivial implementation of ImagesService::Transform.
Based off documentation of the PIL library at
http://www.pythonware.com/library/pil/handbook/index.htm
Args:
request: ImagesTransformRequest, contains image request info.
response: ImagesTransformResponse, contains transformed image.
"""
image = request.image().content()
if not image:
raise apiproxy_errors.ApplicationError(
images_service_pb.ImagesServiceError.NOT_IMAGE)
image = StringIO.StringIO(image)
try:
original_image = Image.open(image)
except IOError:
raise apiproxy_errors.ApplicationError(
images_service_pb.ImagesServiceError.BAD_IMAGE_DATA)
img_format = original_image.format
if img_format not in ("BMP", "GIF", "ICO", "JPEG", "PNG", "TIFF"):
raise apiproxy_errors.ApplicationError(
images_service_pb.ImagesServiceError.NOT_IMAGE)
new_image = self._ProcessTransforms(original_image,
request.transform_list())
response_value = self._EncodeImage(new_image, request.output())
response.mutable_image().set_content(response_value)
def _EncodeImage(self, image, output_encoding):
"""Encode the given image and return it in string form.
Args:
image: PIL Image object, image to encode.
output_encoding: ImagesTransformRequest.OutputSettings object.
Returns:
str with encoded image information in given encoding format.
"""
image_string = StringIO.StringIO()
image_encoding = "PNG"
if (output_encoding.mime_type() == images_service_pb.OutputSettings.JPEG):
image_encoding = "JPEG"
image = image.convert("RGB")
image.save(image_string, image_encoding)
return image_string.getvalue()
def _ValidateCropArg(self, arg):
"""Check an argument for the Crop transform.
Args:
arg: float, argument to Crop transform to check.
Raises:
apiproxy_errors.ApplicationError on problem with argument.
"""
if not isinstance(arg, float):
raise apiproxy_errors.ApplicationError(
images_service_pb.ImagesServiceError.BAD_TRANSFORM_DATA)
if not (0 <= arg <= 1.0):
raise apiproxy_errors.ApplicationError(
images_service_pb.ImagesServiceError.BAD_TRANSFORM_DATA)
def _CalculateNewDimensions(self,
current_width,
current_height,
req_width,
req_height):
"""Get new resize dimensions keeping the current aspect ratio.
This uses the more restricting of the two requested values to determine
the new ratio.
Args:
current_width: int, current width of the image.
current_height: int, current height of the image.
req_width: int, requested new width of the image.
req_height: int, requested new height of the image.
Returns:
tuple (width, height) which are both ints of the new ratio.
"""
width_ratio = float(req_width) / current_width
height_ratio = float(req_height) / current_height
if req_width == 0 or (width_ratio > height_ratio and req_height != 0):
return int(height_ratio * current_width), req_height
else:
return req_width, int(width_ratio * current_height)
def _Resize(self, image, transform):
"""Use PIL to resize the given image with the given transform.
Args:
image: PIL.Image.Image object to resize.
transform: images_service_pb.Transform to use when resizing.
Returns:
PIL.Image.Image with transforms performed on it.
Raises:
BadRequestError if the resize data given is bad.
"""
width = 0
height = 0
if transform.has_width():
width = transform.width()
if width < 0 or 4000 < width:
raise apiproxy_errors.ApplicationError(
images_service_pb.ImagesServiceError.BAD_TRANSFORM_DATA)
if transform.has_height():
height = transform.height()
if height < 0 or 4000 < height:
raise apiproxy_errors.ApplicationError(
images_service_pb.ImagesServiceError.BAD_TRANSFORM_DATA)
current_width, current_height = image.size
new_width, new_height = self._CalculateNewDimensions(current_width,
current_height,
width,
height)
return image.resize((new_width, new_height), Image.ANTIALIAS)
def _Rotate(self, image, transform):
"""Use PIL to rotate the given image with the given transform.
Args:
image: PIL.Image.Image object to rotate.
transform: images_service_pb.Transform to use when rotating.
Returns:
PIL.Image.Image with transforms performed on it.
Raises:
BadRequestError if the rotate data given is bad.
"""
degrees = transform.rotate()
if degrees < 0 or degrees % 90 != 0:
raise apiproxy_errors.ApplicationError(
images_service_pb.ImagesServiceError.BAD_TRANSFORM_DATA)
degrees %= 360
degrees = 360 - degrees
return image.rotate(degrees)
def _Crop(self, image, transform):
"""Use PIL to crop the given image with the given transform.
Args:
image: PIL.Image.Image object to crop.
transform: images_service_pb.Transform to use when cropping.
Returns:
PIL.Image.Image with transforms performed on it.
Raises:
BadRequestError if the crop data given is bad.
"""
left_x = 0.0
top_y = 0.0
right_x = 1.0
bottom_y = 1.0
if transform.has_crop_left_x():
left_x = transform.crop_left_x()
self._ValidateCropArg(left_x)
if transform.has_crop_top_y():
top_y = transform.crop_top_y()
self._ValidateCropArg(top_y)
if transform.has_crop_right_x():
right_x = transform.crop_right_x()
self._ValidateCropArg(right_x)
if transform.has_crop_bottom_y():
bottom_y = transform.crop_bottom_y()
self._ValidateCropArg(bottom_y)
width, height = image.size
box = (int(transform.crop_left_x() * width),
int(transform.crop_top_y() * height),
int(transform.crop_right_x() * width),
int(transform.crop_bottom_y() * height))
return image.crop(box)
def _ProcessTransforms(self, image, transforms):
"""Execute PIL operations based on transform values.
Args:
image: PIL.Image.Image instance, image to manipulate.
trasnforms: list of ImagesTransformRequest.Transform objects.
Returns:
PIL.Image.Image with transforms performed on it.
Raises:
BadRequestError if we are passed more than one of the same type of
transform.
"""
new_image = image
if len(transforms) > images.MAX_TRANSFORMS_PER_REQUEST:
raise apiproxy_errors.ApplicationError(
images_service_pb.ImagesServiceError.BAD_TRANSFORM_DATA)
for transform in transforms:
if transform.has_width() or transform.has_height():
new_image = self._Resize(new_image, transform)
elif transform.has_rotate():
new_image = self._Rotate(new_image, transform)
elif transform.has_horizontal_flip():
new_image = new_image.transpose(Image.FLIP_LEFT_RIGHT)
elif transform.has_vertical_flip():
new_image = new_image.transpose(Image.FLIP_TOP_BOTTOM)
elif (transform.has_crop_left_x() or
transform.has_crop_top_y() or
transform.has_crop_right_x() or
transform.has_crop_bottom_y()):
new_image = self._Crop(new_image, transform)
elif transform.has_autolevels():
logging.info("I'm Feeling Lucky autolevels will be visible once this "
"application is deployed.")
else:
logging.warn("Found no transformations found to perform.")
return new_image
| Python |
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""A NotImplemented Images API stub for when the PIL library is not found."""
class ImagesNotImplementedServiceStub(object):
"""Stub version of images API which raises a NotImplementedError."""
def MakeSyncCall(self, service, call, request, response):
"""Main entry point.
Args:
service: str, must be 'images'.
call: str, name of the RPC to make, must be part of ImagesService.
request: pb object, corresponding args to the 'call' argument.
response: pb object, return value for the 'call' argument.
"""
raise NotImplementedError("Unable to find the Python PIL library. Please "
"view the SDK documentation for details about "
"installing PIL on your system.")
| Python |
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from google.net.proto import ProtocolBuffer
import array
import dummy_thread as thread
__pychecker__ = """maxreturns=0 maxbranches=0 no-callinit
unusednames=printElemNumber,debug_strs no-special"""
class ImagesServiceError(ProtocolBuffer.ProtocolMessage):
UNSPECIFIED_ERROR = 1
BAD_TRANSFORM_DATA = 2
NOT_IMAGE = 3
BAD_IMAGE_DATA = 4
IMAGE_TOO_LARGE = 5
_ErrorCode_NAMES = {
1: "UNSPECIFIED_ERROR",
2: "BAD_TRANSFORM_DATA",
3: "NOT_IMAGE",
4: "BAD_IMAGE_DATA",
5: "IMAGE_TOO_LARGE",
}
def ErrorCode_Name(cls, x): return cls._ErrorCode_NAMES.get(x, "")
ErrorCode_Name = classmethod(ErrorCode_Name)
def __init__(self, contents=None):
pass
if contents is not None: self.MergeFromString(contents)
def MergeFrom(self, x):
assert x is not self
def Equals(self, x):
if x is self: return 1
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
return initialized
def ByteSize(self):
n = 0
return n + 0
def Clear(self):
pass
def OutputUnchecked(self, out):
pass
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
return res
_TEXT = (
"ErrorCode",
)
_TYPES = (
ProtocolBuffer.Encoder.NUMERIC,
)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
class ImagesServiceTransform(ProtocolBuffer.ProtocolMessage):
RESIZE = 1
ROTATE = 2
HORIZONTAL_FLIP = 3
VERTICAL_FLIP = 4
CROP = 5
IM_FEELING_LUCKY = 6
_Type_NAMES = {
1: "RESIZE",
2: "ROTATE",
3: "HORIZONTAL_FLIP",
4: "VERTICAL_FLIP",
5: "CROP",
6: "IM_FEELING_LUCKY",
}
def Type_Name(cls, x): return cls._Type_NAMES.get(x, "")
Type_Name = classmethod(Type_Name)
def __init__(self, contents=None):
pass
if contents is not None: self.MergeFromString(contents)
def MergeFrom(self, x):
assert x is not self
def Equals(self, x):
if x is self: return 1
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
return initialized
def ByteSize(self):
n = 0
return n + 0
def Clear(self):
pass
def OutputUnchecked(self, out):
pass
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
return res
_TEXT = (
"ErrorCode",
)
_TYPES = (
ProtocolBuffer.Encoder.NUMERIC,
)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
class Transform(ProtocolBuffer.ProtocolMessage):
has_width_ = 0
width_ = 0
has_height_ = 0
height_ = 0
has_rotate_ = 0
rotate_ = 0
has_horizontal_flip_ = 0
horizontal_flip_ = 0
has_vertical_flip_ = 0
vertical_flip_ = 0
has_crop_left_x_ = 0
crop_left_x_ = 0.0
has_crop_top_y_ = 0
crop_top_y_ = 0.0
has_crop_right_x_ = 0
crop_right_x_ = 1.0
has_crop_bottom_y_ = 0
crop_bottom_y_ = 1.0
has_autolevels_ = 0
autolevels_ = 0
def __init__(self, contents=None):
if contents is not None: self.MergeFromString(contents)
def width(self): return self.width_
def set_width(self, x):
self.has_width_ = 1
self.width_ = x
def clear_width(self):
if self.has_width_:
self.has_width_ = 0
self.width_ = 0
def has_width(self): return self.has_width_
def height(self): return self.height_
def set_height(self, x):
self.has_height_ = 1
self.height_ = x
def clear_height(self):
if self.has_height_:
self.has_height_ = 0
self.height_ = 0
def has_height(self): return self.has_height_
def rotate(self): return self.rotate_
def set_rotate(self, x):
self.has_rotate_ = 1
self.rotate_ = x
def clear_rotate(self):
if self.has_rotate_:
self.has_rotate_ = 0
self.rotate_ = 0
def has_rotate(self): return self.has_rotate_
def horizontal_flip(self): return self.horizontal_flip_
def set_horizontal_flip(self, x):
self.has_horizontal_flip_ = 1
self.horizontal_flip_ = x
def clear_horizontal_flip(self):
if self.has_horizontal_flip_:
self.has_horizontal_flip_ = 0
self.horizontal_flip_ = 0
def has_horizontal_flip(self): return self.has_horizontal_flip_
def vertical_flip(self): return self.vertical_flip_
def set_vertical_flip(self, x):
self.has_vertical_flip_ = 1
self.vertical_flip_ = x
def clear_vertical_flip(self):
if self.has_vertical_flip_:
self.has_vertical_flip_ = 0
self.vertical_flip_ = 0
def has_vertical_flip(self): return self.has_vertical_flip_
def crop_left_x(self): return self.crop_left_x_
def set_crop_left_x(self, x):
self.has_crop_left_x_ = 1
self.crop_left_x_ = x
def clear_crop_left_x(self):
if self.has_crop_left_x_:
self.has_crop_left_x_ = 0
self.crop_left_x_ = 0.0
def has_crop_left_x(self): return self.has_crop_left_x_
def crop_top_y(self): return self.crop_top_y_
def set_crop_top_y(self, x):
self.has_crop_top_y_ = 1
self.crop_top_y_ = x
def clear_crop_top_y(self):
if self.has_crop_top_y_:
self.has_crop_top_y_ = 0
self.crop_top_y_ = 0.0
def has_crop_top_y(self): return self.has_crop_top_y_
def crop_right_x(self): return self.crop_right_x_
def set_crop_right_x(self, x):
self.has_crop_right_x_ = 1
self.crop_right_x_ = x
def clear_crop_right_x(self):
if self.has_crop_right_x_:
self.has_crop_right_x_ = 0
self.crop_right_x_ = 1.0
def has_crop_right_x(self): return self.has_crop_right_x_
def crop_bottom_y(self): return self.crop_bottom_y_
def set_crop_bottom_y(self, x):
self.has_crop_bottom_y_ = 1
self.crop_bottom_y_ = x
def clear_crop_bottom_y(self):
if self.has_crop_bottom_y_:
self.has_crop_bottom_y_ = 0
self.crop_bottom_y_ = 1.0
def has_crop_bottom_y(self): return self.has_crop_bottom_y_
def autolevels(self): return self.autolevels_
def set_autolevels(self, x):
self.has_autolevels_ = 1
self.autolevels_ = x
def clear_autolevels(self):
if self.has_autolevels_:
self.has_autolevels_ = 0
self.autolevels_ = 0
def has_autolevels(self): return self.has_autolevels_
def MergeFrom(self, x):
assert x is not self
if (x.has_width()): self.set_width(x.width())
if (x.has_height()): self.set_height(x.height())
if (x.has_rotate()): self.set_rotate(x.rotate())
if (x.has_horizontal_flip()): self.set_horizontal_flip(x.horizontal_flip())
if (x.has_vertical_flip()): self.set_vertical_flip(x.vertical_flip())
if (x.has_crop_left_x()): self.set_crop_left_x(x.crop_left_x())
if (x.has_crop_top_y()): self.set_crop_top_y(x.crop_top_y())
if (x.has_crop_right_x()): self.set_crop_right_x(x.crop_right_x())
if (x.has_crop_bottom_y()): self.set_crop_bottom_y(x.crop_bottom_y())
if (x.has_autolevels()): self.set_autolevels(x.autolevels())
def Equals(self, x):
if x is self: return 1
if self.has_width_ != x.has_width_: return 0
if self.has_width_ and self.width_ != x.width_: return 0
if self.has_height_ != x.has_height_: return 0
if self.has_height_ and self.height_ != x.height_: return 0
if self.has_rotate_ != x.has_rotate_: return 0
if self.has_rotate_ and self.rotate_ != x.rotate_: return 0
if self.has_horizontal_flip_ != x.has_horizontal_flip_: return 0
if self.has_horizontal_flip_ and self.horizontal_flip_ != x.horizontal_flip_: return 0
if self.has_vertical_flip_ != x.has_vertical_flip_: return 0
if self.has_vertical_flip_ and self.vertical_flip_ != x.vertical_flip_: return 0
if self.has_crop_left_x_ != x.has_crop_left_x_: return 0
if self.has_crop_left_x_ and self.crop_left_x_ != x.crop_left_x_: return 0
if self.has_crop_top_y_ != x.has_crop_top_y_: return 0
if self.has_crop_top_y_ and self.crop_top_y_ != x.crop_top_y_: return 0
if self.has_crop_right_x_ != x.has_crop_right_x_: return 0
if self.has_crop_right_x_ and self.crop_right_x_ != x.crop_right_x_: return 0
if self.has_crop_bottom_y_ != x.has_crop_bottom_y_: return 0
if self.has_crop_bottom_y_ and self.crop_bottom_y_ != x.crop_bottom_y_: return 0
if self.has_autolevels_ != x.has_autolevels_: return 0
if self.has_autolevels_ and self.autolevels_ != x.autolevels_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
return initialized
def ByteSize(self):
n = 0
if (self.has_width_): n += 1 + self.lengthVarInt64(self.width_)
if (self.has_height_): n += 1 + self.lengthVarInt64(self.height_)
if (self.has_rotate_): n += 1 + self.lengthVarInt64(self.rotate_)
if (self.has_horizontal_flip_): n += 2
if (self.has_vertical_flip_): n += 2
if (self.has_crop_left_x_): n += 5
if (self.has_crop_top_y_): n += 5
if (self.has_crop_right_x_): n += 5
if (self.has_crop_bottom_y_): n += 5
if (self.has_autolevels_): n += 2
return n + 0
def Clear(self):
self.clear_width()
self.clear_height()
self.clear_rotate()
self.clear_horizontal_flip()
self.clear_vertical_flip()
self.clear_crop_left_x()
self.clear_crop_top_y()
self.clear_crop_right_x()
self.clear_crop_bottom_y()
self.clear_autolevels()
def OutputUnchecked(self, out):
if (self.has_width_):
out.putVarInt32(8)
out.putVarInt32(self.width_)
if (self.has_height_):
out.putVarInt32(16)
out.putVarInt32(self.height_)
if (self.has_rotate_):
out.putVarInt32(24)
out.putVarInt32(self.rotate_)
if (self.has_horizontal_flip_):
out.putVarInt32(32)
out.putBoolean(self.horizontal_flip_)
if (self.has_vertical_flip_):
out.putVarInt32(40)
out.putBoolean(self.vertical_flip_)
if (self.has_crop_left_x_):
out.putVarInt32(53)
out.putFloat(self.crop_left_x_)
if (self.has_crop_top_y_):
out.putVarInt32(61)
out.putFloat(self.crop_top_y_)
if (self.has_crop_right_x_):
out.putVarInt32(69)
out.putFloat(self.crop_right_x_)
if (self.has_crop_bottom_y_):
out.putVarInt32(77)
out.putFloat(self.crop_bottom_y_)
if (self.has_autolevels_):
out.putVarInt32(80)
out.putBoolean(self.autolevels_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 8:
self.set_width(d.getVarInt32())
continue
if tt == 16:
self.set_height(d.getVarInt32())
continue
if tt == 24:
self.set_rotate(d.getVarInt32())
continue
if tt == 32:
self.set_horizontal_flip(d.getBoolean())
continue
if tt == 40:
self.set_vertical_flip(d.getBoolean())
continue
if tt == 53:
self.set_crop_left_x(d.getFloat())
continue
if tt == 61:
self.set_crop_top_y(d.getFloat())
continue
if tt == 69:
self.set_crop_right_x(d.getFloat())
continue
if tt == 77:
self.set_crop_bottom_y(d.getFloat())
continue
if tt == 80:
self.set_autolevels(d.getBoolean())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_width_: res+=prefix+("width: %s\n" % self.DebugFormatInt32(self.width_))
if self.has_height_: res+=prefix+("height: %s\n" % self.DebugFormatInt32(self.height_))
if self.has_rotate_: res+=prefix+("rotate: %s\n" % self.DebugFormatInt32(self.rotate_))
if self.has_horizontal_flip_: res+=prefix+("horizontal_flip: %s\n" % self.DebugFormatBool(self.horizontal_flip_))
if self.has_vertical_flip_: res+=prefix+("vertical_flip: %s\n" % self.DebugFormatBool(self.vertical_flip_))
if self.has_crop_left_x_: res+=prefix+("crop_left_x: %s\n" % self.DebugFormatFloat(self.crop_left_x_))
if self.has_crop_top_y_: res+=prefix+("crop_top_y: %s\n" % self.DebugFormatFloat(self.crop_top_y_))
if self.has_crop_right_x_: res+=prefix+("crop_right_x: %s\n" % self.DebugFormatFloat(self.crop_right_x_))
if self.has_crop_bottom_y_: res+=prefix+("crop_bottom_y: %s\n" % self.DebugFormatFloat(self.crop_bottom_y_))
if self.has_autolevels_: res+=prefix+("autolevels: %s\n" % self.DebugFormatBool(self.autolevels_))
return res
kwidth = 1
kheight = 2
krotate = 3
khorizontal_flip = 4
kvertical_flip = 5
kcrop_left_x = 6
kcrop_top_y = 7
kcrop_right_x = 8
kcrop_bottom_y = 9
kautolevels = 10
_TEXT = (
"ErrorCode",
"width",
"height",
"rotate",
"horizontal_flip",
"vertical_flip",
"crop_left_x",
"crop_top_y",
"crop_right_x",
"crop_bottom_y",
"autolevels",
)
_TYPES = (
ProtocolBuffer.Encoder.NUMERIC,
ProtocolBuffer.Encoder.NUMERIC,
ProtocolBuffer.Encoder.NUMERIC,
ProtocolBuffer.Encoder.NUMERIC,
ProtocolBuffer.Encoder.NUMERIC,
ProtocolBuffer.Encoder.NUMERIC,
ProtocolBuffer.Encoder.FLOAT,
ProtocolBuffer.Encoder.FLOAT,
ProtocolBuffer.Encoder.FLOAT,
ProtocolBuffer.Encoder.FLOAT,
ProtocolBuffer.Encoder.NUMERIC,
)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
class ImageData(ProtocolBuffer.ProtocolMessage):
has_content_ = 0
content_ = ""
def __init__(self, contents=None):
if contents is not None: self.MergeFromString(contents)
def content(self): return self.content_
def set_content(self, x):
self.has_content_ = 1
self.content_ = x
def clear_content(self):
if self.has_content_:
self.has_content_ = 0
self.content_ = ""
def has_content(self): return self.has_content_
def MergeFrom(self, x):
assert x is not self
if (x.has_content()): self.set_content(x.content())
def Equals(self, x):
if x is self: return 1
if self.has_content_ != x.has_content_: return 0
if self.has_content_ and self.content_ != x.content_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_content_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: content not set.')
return initialized
def ByteSize(self):
n = 0
n += self.lengthString(len(self.content_))
return n + 1
def Clear(self):
self.clear_content()
def OutputUnchecked(self, out):
out.putVarInt32(10)
out.putPrefixedString(self.content_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
self.set_content(d.getPrefixedString())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_content_: res+=prefix+("content: %s\n" % self.DebugFormatString(self.content_))
return res
kcontent = 1
_TEXT = (
"ErrorCode",
"content",
)
_TYPES = (
ProtocolBuffer.Encoder.NUMERIC,
ProtocolBuffer.Encoder.STRING,
)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
class OutputSettings(ProtocolBuffer.ProtocolMessage):
PNG = 0
JPEG = 1
_MIME_TYPE_NAMES = {
0: "PNG",
1: "JPEG",
}
def MIME_TYPE_Name(cls, x): return cls._MIME_TYPE_NAMES.get(x, "")
MIME_TYPE_Name = classmethod(MIME_TYPE_Name)
has_mime_type_ = 0
mime_type_ = 0
def __init__(self, contents=None):
if contents is not None: self.MergeFromString(contents)
def mime_type(self): return self.mime_type_
def set_mime_type(self, x):
self.has_mime_type_ = 1
self.mime_type_ = x
def clear_mime_type(self):
if self.has_mime_type_:
self.has_mime_type_ = 0
self.mime_type_ = 0
def has_mime_type(self): return self.has_mime_type_
def MergeFrom(self, x):
assert x is not self
if (x.has_mime_type()): self.set_mime_type(x.mime_type())
def Equals(self, x):
if x is self: return 1
if self.has_mime_type_ != x.has_mime_type_: return 0
if self.has_mime_type_ and self.mime_type_ != x.mime_type_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
return initialized
def ByteSize(self):
n = 0
if (self.has_mime_type_): n += 1 + self.lengthVarInt64(self.mime_type_)
return n + 0
def Clear(self):
self.clear_mime_type()
def OutputUnchecked(self, out):
if (self.has_mime_type_):
out.putVarInt32(8)
out.putVarInt32(self.mime_type_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 8:
self.set_mime_type(d.getVarInt32())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_mime_type_: res+=prefix+("mime_type: %s\n" % self.DebugFormatInt32(self.mime_type_))
return res
kmime_type = 1
_TEXT = (
"ErrorCode",
"mime_type",
)
_TYPES = (
ProtocolBuffer.Encoder.NUMERIC,
ProtocolBuffer.Encoder.NUMERIC,
)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
class ImagesTransformRequest(ProtocolBuffer.ProtocolMessage):
has_image_ = 0
has_output_ = 0
def __init__(self, contents=None):
self.image_ = ImageData()
self.transform_ = []
self.output_ = OutputSettings()
if contents is not None: self.MergeFromString(contents)
def image(self): return self.image_
def mutable_image(self): self.has_image_ = 1; return self.image_
def clear_image(self):self.has_image_ = 0; self.image_.Clear()
def has_image(self): return self.has_image_
def transform_size(self): return len(self.transform_)
def transform_list(self): return self.transform_
def transform(self, i):
return self.transform_[i]
def mutable_transform(self, i):
return self.transform_[i]
def add_transform(self):
x = Transform()
self.transform_.append(x)
return x
def clear_transform(self):
self.transform_ = []
def output(self): return self.output_
def mutable_output(self): self.has_output_ = 1; return self.output_
def clear_output(self):self.has_output_ = 0; self.output_.Clear()
def has_output(self): return self.has_output_
def MergeFrom(self, x):
assert x is not self
if (x.has_image()): self.mutable_image().MergeFrom(x.image())
for i in xrange(x.transform_size()): self.add_transform().CopyFrom(x.transform(i))
if (x.has_output()): self.mutable_output().MergeFrom(x.output())
def Equals(self, x):
if x is self: return 1
if self.has_image_ != x.has_image_: return 0
if self.has_image_ and self.image_ != x.image_: return 0
if len(self.transform_) != len(x.transform_): return 0
for e1, e2 in zip(self.transform_, x.transform_):
if e1 != e2: return 0
if self.has_output_ != x.has_output_: return 0
if self.has_output_ and self.output_ != x.output_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_image_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: image not set.')
elif not self.image_.IsInitialized(debug_strs): initialized = 0
for p in self.transform_:
if not p.IsInitialized(debug_strs): initialized=0
if (not self.has_output_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: output not set.')
elif not self.output_.IsInitialized(debug_strs): initialized = 0
return initialized
def ByteSize(self):
n = 0
n += self.lengthString(self.image_.ByteSize())
n += 1 * len(self.transform_)
for i in xrange(len(self.transform_)): n += self.lengthString(self.transform_[i].ByteSize())
n += self.lengthString(self.output_.ByteSize())
return n + 2
def Clear(self):
self.clear_image()
self.clear_transform()
self.clear_output()
def OutputUnchecked(self, out):
out.putVarInt32(10)
out.putVarInt32(self.image_.ByteSize())
self.image_.OutputUnchecked(out)
for i in xrange(len(self.transform_)):
out.putVarInt32(18)
out.putVarInt32(self.transform_[i].ByteSize())
self.transform_[i].OutputUnchecked(out)
out.putVarInt32(26)
out.putVarInt32(self.output_.ByteSize())
self.output_.OutputUnchecked(out)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_image().TryMerge(tmp)
continue
if tt == 18:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.add_transform().TryMerge(tmp)
continue
if tt == 26:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_output().TryMerge(tmp)
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_image_:
res+=prefix+"image <\n"
res+=self.image_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
cnt=0
for e in self.transform_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("transform%s <\n" % elm)
res+=e.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
cnt+=1
if self.has_output_:
res+=prefix+"output <\n"
res+=self.output_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
return res
kimage = 1
ktransform = 2
koutput = 3
_TEXT = (
"ErrorCode",
"image",
"transform",
"output",
)
_TYPES = (
ProtocolBuffer.Encoder.NUMERIC,
ProtocolBuffer.Encoder.STRING,
ProtocolBuffer.Encoder.STRING,
ProtocolBuffer.Encoder.STRING,
)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
class ImagesTransformResponse(ProtocolBuffer.ProtocolMessage):
has_image_ = 0
def __init__(self, contents=None):
self.image_ = ImageData()
if contents is not None: self.MergeFromString(contents)
def image(self): return self.image_
def mutable_image(self): self.has_image_ = 1; return self.image_
def clear_image(self):self.has_image_ = 0; self.image_.Clear()
def has_image(self): return self.has_image_
def MergeFrom(self, x):
assert x is not self
if (x.has_image()): self.mutable_image().MergeFrom(x.image())
def Equals(self, x):
if x is self: return 1
if self.has_image_ != x.has_image_: return 0
if self.has_image_ and self.image_ != x.image_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_image_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: image not set.')
elif not self.image_.IsInitialized(debug_strs): initialized = 0
return initialized
def ByteSize(self):
n = 0
n += self.lengthString(self.image_.ByteSize())
return n + 1
def Clear(self):
self.clear_image()
def OutputUnchecked(self, out):
out.putVarInt32(10)
out.putVarInt32(self.image_.ByteSize())
self.image_.OutputUnchecked(out)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_image().TryMerge(tmp)
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_image_:
res+=prefix+"image <\n"
res+=self.image_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
return res
kimage = 1
_TEXT = (
"ErrorCode",
"image",
)
_TYPES = (
ProtocolBuffer.Encoder.NUMERIC,
ProtocolBuffer.Encoder.STRING,
)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
__all__ = ['ImagesServiceError','ImagesServiceTransform','Transform','ImageData','OutputSettings','ImagesTransformRequest','ImagesTransformResponse']
| Python |
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Stub version of the images API."""
import logging
import StringIO
try:
import PIL
from PIL import _imaging
from PIL import Image
except ImportError:
import _imaging
import Image
from google.appengine.api import apiproxy_stub
from google.appengine.api import images
from google.appengine.api.images import images_service_pb
from google.appengine.runtime import apiproxy_errors
class ImagesServiceStub(apiproxy_stub.APIProxyStub):
"""Stub version of images API to be used with the dev_appserver."""
def __init__(self, service_name='images'):
"""Preloads PIL to load all modules in the unhardened environment.
Args:
service_name: Service name expected for all calls.
"""
super(ImagesServiceStub, self).__init__(service_name)
Image.init()
def _Dynamic_Transform(self, request, response):
"""Trivial implementation of ImagesService::Transform.
Based off documentation of the PIL library at
http://www.pythonware.com/library/pil/handbook/index.htm
Args:
request: ImagesTransformRequest, contains image request info.
response: ImagesTransformResponse, contains transformed image.
"""
image = request.image().content()
if not image:
raise apiproxy_errors.ApplicationError(
images_service_pb.ImagesServiceError.NOT_IMAGE)
image = StringIO.StringIO(image)
try:
original_image = Image.open(image)
except IOError:
raise apiproxy_errors.ApplicationError(
images_service_pb.ImagesServiceError.BAD_IMAGE_DATA)
img_format = original_image.format
if img_format not in ("BMP", "GIF", "ICO", "JPEG", "PNG", "TIFF"):
raise apiproxy_errors.ApplicationError(
images_service_pb.ImagesServiceError.NOT_IMAGE)
new_image = self._ProcessTransforms(original_image,
request.transform_list())
response_value = self._EncodeImage(new_image, request.output())
response.mutable_image().set_content(response_value)
def _EncodeImage(self, image, output_encoding):
"""Encode the given image and return it in string form.
Args:
image: PIL Image object, image to encode.
output_encoding: ImagesTransformRequest.OutputSettings object.
Returns:
str with encoded image information in given encoding format.
"""
image_string = StringIO.StringIO()
image_encoding = "PNG"
if (output_encoding.mime_type() == images_service_pb.OutputSettings.JPEG):
image_encoding = "JPEG"
image = image.convert("RGB")
image.save(image_string, image_encoding)
return image_string.getvalue()
def _ValidateCropArg(self, arg):
"""Check an argument for the Crop transform.
Args:
arg: float, argument to Crop transform to check.
Raises:
apiproxy_errors.ApplicationError on problem with argument.
"""
if not isinstance(arg, float):
raise apiproxy_errors.ApplicationError(
images_service_pb.ImagesServiceError.BAD_TRANSFORM_DATA)
if not (0 <= arg <= 1.0):
raise apiproxy_errors.ApplicationError(
images_service_pb.ImagesServiceError.BAD_TRANSFORM_DATA)
def _CalculateNewDimensions(self,
current_width,
current_height,
req_width,
req_height):
"""Get new resize dimensions keeping the current aspect ratio.
This uses the more restricting of the two requested values to determine
the new ratio.
Args:
current_width: int, current width of the image.
current_height: int, current height of the image.
req_width: int, requested new width of the image.
req_height: int, requested new height of the image.
Returns:
tuple (width, height) which are both ints of the new ratio.
"""
width_ratio = float(req_width) / current_width
height_ratio = float(req_height) / current_height
if req_width == 0 or (width_ratio > height_ratio and req_height != 0):
return int(height_ratio * current_width), req_height
else:
return req_width, int(width_ratio * current_height)
def _Resize(self, image, transform):
"""Use PIL to resize the given image with the given transform.
Args:
image: PIL.Image.Image object to resize.
transform: images_service_pb.Transform to use when resizing.
Returns:
PIL.Image.Image with transforms performed on it.
Raises:
BadRequestError if the resize data given is bad.
"""
width = 0
height = 0
if transform.has_width():
width = transform.width()
if width < 0 or 4000 < width:
raise apiproxy_errors.ApplicationError(
images_service_pb.ImagesServiceError.BAD_TRANSFORM_DATA)
if transform.has_height():
height = transform.height()
if height < 0 or 4000 < height:
raise apiproxy_errors.ApplicationError(
images_service_pb.ImagesServiceError.BAD_TRANSFORM_DATA)
current_width, current_height = image.size
new_width, new_height = self._CalculateNewDimensions(current_width,
current_height,
width,
height)
return image.resize((new_width, new_height), Image.ANTIALIAS)
def _Rotate(self, image, transform):
"""Use PIL to rotate the given image with the given transform.
Args:
image: PIL.Image.Image object to rotate.
transform: images_service_pb.Transform to use when rotating.
Returns:
PIL.Image.Image with transforms performed on it.
Raises:
BadRequestError if the rotate data given is bad.
"""
degrees = transform.rotate()
if degrees < 0 or degrees % 90 != 0:
raise apiproxy_errors.ApplicationError(
images_service_pb.ImagesServiceError.BAD_TRANSFORM_DATA)
degrees %= 360
degrees = 360 - degrees
return image.rotate(degrees)
def _Crop(self, image, transform):
"""Use PIL to crop the given image with the given transform.
Args:
image: PIL.Image.Image object to crop.
transform: images_service_pb.Transform to use when cropping.
Returns:
PIL.Image.Image with transforms performed on it.
Raises:
BadRequestError if the crop data given is bad.
"""
left_x = 0.0
top_y = 0.0
right_x = 1.0
bottom_y = 1.0
if transform.has_crop_left_x():
left_x = transform.crop_left_x()
self._ValidateCropArg(left_x)
if transform.has_crop_top_y():
top_y = transform.crop_top_y()
self._ValidateCropArg(top_y)
if transform.has_crop_right_x():
right_x = transform.crop_right_x()
self._ValidateCropArg(right_x)
if transform.has_crop_bottom_y():
bottom_y = transform.crop_bottom_y()
self._ValidateCropArg(bottom_y)
width, height = image.size
box = (int(transform.crop_left_x() * width),
int(transform.crop_top_y() * height),
int(transform.crop_right_x() * width),
int(transform.crop_bottom_y() * height))
return image.crop(box)
def _ProcessTransforms(self, image, transforms):
"""Execute PIL operations based on transform values.
Args:
image: PIL.Image.Image instance, image to manipulate.
trasnforms: list of ImagesTransformRequest.Transform objects.
Returns:
PIL.Image.Image with transforms performed on it.
Raises:
BadRequestError if we are passed more than one of the same type of
transform.
"""
new_image = image
if len(transforms) > images.MAX_TRANSFORMS_PER_REQUEST:
raise apiproxy_errors.ApplicationError(
images_service_pb.ImagesServiceError.BAD_TRANSFORM_DATA)
for transform in transforms:
if transform.has_width() or transform.has_height():
new_image = self._Resize(new_image, transform)
elif transform.has_rotate():
new_image = self._Rotate(new_image, transform)
elif transform.has_horizontal_flip():
new_image = new_image.transpose(Image.FLIP_LEFT_RIGHT)
elif transform.has_vertical_flip():
new_image = new_image.transpose(Image.FLIP_TOP_BOTTOM)
elif (transform.has_crop_left_x() or
transform.has_crop_top_y() or
transform.has_crop_right_x() or
transform.has_crop_bottom_y()):
new_image = self._Crop(new_image, transform)
elif transform.has_autolevels():
logging.info("I'm Feeling Lucky autolevels will be visible once this "
"application is deployed.")
else:
logging.warn("Found no transformations found to perform.")
return new_image
| Python |
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Image manipulation API.
Classes defined in this module:
Image: class used to encapsulate image information and transformations for
that image.
The current manipulations that are available are resize, rotate,
horizontal_flip, vertical_flip, crop and im_feeling_lucky.
It should be noted that each transform can only be called once per image
per execute_transforms() call.
"""
import struct
from google.appengine.api import apiproxy_stub_map
from google.appengine.api.images import images_service_pb
from google.appengine.runtime import apiproxy_errors
JPEG = images_service_pb.OutputSettings.JPEG
PNG = images_service_pb.OutputSettings.PNG
OUTPUT_ENCODING_TYPES = frozenset([JPEG, PNG])
MAX_TRANSFORMS_PER_REQUEST = 10
class Error(Exception):
"""Base error class for this module."""
class TransformationError(Error):
"""Error while attempting to transform the image."""
class BadRequestError(Error):
"""The parameters given had something wrong with them."""
class NotImageError(Error):
"""The image data given is not recognizable as an image."""
class BadImageError(Error):
"""The image data given is corrupt."""
class LargeImageError(Error):
"""The image data given is too large to process."""
class Image(object):
"""Image object to manipulate."""
def __init__(self, image_data):
"""Constructor.
Args:
image_data: str, image data in string form.
Raises:
NotImageError if the given data is empty.
"""
if not image_data:
raise NotImageError("Empty image data.")
self._image_data = image_data
self._transforms = []
self._width = None
self._height = None
def _check_transform_limits(self):
"""Ensure some simple limits on the number of transforms allowed.
Raises:
BadRequestError if MAX_TRANSFORMS_PER_REQUEST transforms have already been
requested for this image
"""
if len(self._transforms) >= MAX_TRANSFORMS_PER_REQUEST:
raise BadRequestError("%d transforms have already been requested on this "
"image." % MAX_TRANSFORMS_PER_REQUEST)
def _update_dimensions(self):
"""Updates the width and height fields of the image.
Raises:
NotImageError if the image data is not an image.
BadImageError if the image data is corrupt.
"""
size = len(self._image_data)
if size >= 6 and self._image_data.startswith("GIF"):
self._update_gif_dimensions()
elif size >= 8 and self._image_data.startswith("\x89PNG\x0D\x0A\x1A\x0A"):
self._update_png_dimensions()
elif size >= 2 and self._image_data.startswith("\xff\xD8"):
self._update_jpeg_dimensions()
elif (size >= 8 and (self._image_data.startswith("II\x2a\x00") or
self._image_data.startswith("MM\x00\x2a"))):
self._update_tiff_dimensions()
elif size >= 2 and self._image_data.startswith("BM"):
self._update_bmp_dimensions()
elif size >= 4 and self._image_data.startswith("\x00\x00\x01\x00"):
self._update_ico_dimensions()
else:
raise NotImageError("Unrecognized image format")
def _update_gif_dimensions(self):
"""Updates the width and height fields of the gif image.
Raises:
BadImageError if the image string is not a valid gif image.
"""
size = len(self._image_data)
if size >= 10:
self._width, self._height = struct.unpack("<HH", self._image_data[6:10])
else:
raise BadImageError("Corrupt GIF format")
def _update_png_dimensions(self):
"""Updates the width and height fields of the png image.
Raises:
BadImageError if the image string is not a valid png image.
"""
size = len(self._image_data)
if size >= 24 and self._image_data[12:16] == "IHDR":
self._width, self._height = struct.unpack(">II", self._image_data[16:24])
else:
raise BadImageError("Corrupt PNG format")
def _update_jpeg_dimensions(self):
"""Updates the width and height fields of the jpeg image.
Raises:
BadImageError if the image string is not a valid jpeg image.
"""
size = len(self._image_data)
offset = 2
while offset < size:
while offset < size and ord(self._image_data[offset]) != 0xFF:
offset += 1
while offset < size and ord(self._image_data[offset]) == 0xFF:
offset += 1
if (offset < size and ord(self._image_data[offset]) & 0xF0 == 0xC0 and
ord(self._image_data[offset]) != 0xC4):
offset += 4
if offset + 4 <= size:
self._height, self._width = struct.unpack(
">HH",
self._image_data[offset:offset + 4])
break
else:
raise BadImageError("Corrupt JPEG format")
elif offset + 3 <= size:
offset += 1
offset += struct.unpack(">H", self._image_data[offset:offset + 2])[0]
else:
raise BadImageError("Corrupt JPEG format")
if self._height is None or self._width is None:
raise BadImageError("Corrupt JPEG format")
def _update_tiff_dimensions(self):
"""Updates the width and height fields of the tiff image.
Raises:
BadImageError if the image string is not a valid tiff image.
"""
size = len(self._image_data)
if self._image_data.startswith("II"):
endianness = "<"
else:
endianness = ">"
ifd_offset = struct.unpack(endianness + "I", self._image_data[4:8])[0]
if ifd_offset + 14 <= size:
ifd_size = struct.unpack(
endianness + "H",
self._image_data[ifd_offset:ifd_offset + 2])[0]
ifd_offset += 2
for unused_i in range(0, ifd_size):
if ifd_offset + 12 <= size:
tag = struct.unpack(
endianness + "H",
self._image_data[ifd_offset:ifd_offset + 2])[0]
if tag == 0x100 or tag == 0x101:
value_type = struct.unpack(
endianness + "H",
self._image_data[ifd_offset + 2:ifd_offset + 4])[0]
if value_type == 3:
format = endianness + "H"
end_offset = ifd_offset + 10
elif value_type == 4:
format = endianness + "I"
end_offset = ifd_offset + 12
else:
format = endianness + "B"
end_offset = ifd_offset + 9
if tag == 0x100:
self._width = struct.unpack(
format,
self._image_data[ifd_offset + 8:end_offset])[0]
if self._height is not None:
break
else:
self._height = struct.unpack(
format,
self._image_data[ifd_offset + 8:end_offset])[0]
if self._width is not None:
break
ifd_offset += 12
else:
raise BadImageError("Corrupt TIFF format")
if self._width is None or self._height is None:
raise BadImageError("Corrupt TIFF format")
def _update_bmp_dimensions(self):
"""Updates the width and height fields of the bmp image.
Raises:
BadImageError if the image string is not a valid bmp image.
"""
size = len(self._image_data)
if size >= 18:
header_length = struct.unpack("<I", self._image_data[14:18])[0]
if ((header_length == 40 or header_length == 108 or
header_length == 124 or header_length == 64) and size >= 26):
self._width, self._height = struct.unpack("<II",
self._image_data[18:26])
elif header_length == 12 and size >= 22:
self._width, self._height = struct.unpack("<HH",
self._image_data[18:22])
else:
raise BadImageError("Corrupt BMP format")
else:
raise BadImageError("Corrupt BMP format")
def _update_ico_dimensions(self):
"""Updates the width and height fields of the ico image.
Raises:
BadImageError if the image string is not a valid ico image.
"""
size = len(self._image_data)
if size >= 8:
self._width, self._height = struct.unpack("<BB", self._image_data[6:8])
if not self._width:
self._width = 256
if not self._height:
self._height = 256
else:
raise BadImageError("Corrupt ICO format")
def resize(self, width=0, height=0):
"""Resize the image maintaining the aspect ratio.
If both width and height are specified, the more restricting of the two
values will be used when resizing the photo. The maximum dimension allowed
for both width and height is 4000 pixels.
Args:
width: int, width (in pixels) to change the image width to.
height: int, height (in pixels) to change the image height to.
Raises:
TypeError when width or height is not either 'int' or 'long' types.
BadRequestError when there is something wrong with the given height or
width or if MAX_TRANSFORMS_PER_REQUEST transforms have already been
requested on this image.
"""
if (not isinstance(width, (int, long)) or
not isinstance(height, (int, long))):
raise TypeError("Width and height must be integers.")
if width < 0 or height < 0:
raise BadRequestError("Width and height must be >= 0.")
if not width and not height:
raise BadRequestError("At least one of width or height must be > 0.")
if width > 4000 or height > 4000:
raise BadRequestError("Both width and height must be < 4000.")
self._check_transform_limits()
transform = images_service_pb.Transform()
transform.set_width(width)
transform.set_height(height)
self._transforms.append(transform)
def rotate(self, degrees):
"""Rotate an image a given number of degrees clockwise.
Args:
degrees: int, must be a multiple of 90.
Raises:
TypeError when degrees is not either 'int' or 'long' types.
BadRequestError when there is something wrong with the given degrees or
if MAX_TRANSFORMS_PER_REQUEST transforms have already been requested.
"""
if not isinstance(degrees, (int, long)):
raise TypeError("Degrees must be integers.")
if degrees % 90 != 0:
raise BadRequestError("degrees argument must be multiple of 90.")
degrees = degrees % 360
self._check_transform_limits()
transform = images_service_pb.Transform()
transform.set_rotate(degrees)
self._transforms.append(transform)
def horizontal_flip(self):
"""Flip the image horizontally.
Raises:
BadRequestError if MAX_TRANSFORMS_PER_REQUEST transforms have already been
requested on the image.
"""
self._check_transform_limits()
transform = images_service_pb.Transform()
transform.set_horizontal_flip(True)
self._transforms.append(transform)
def vertical_flip(self):
"""Flip the image vertically.
Raises:
BadRequestError if MAX_TRANSFORMS_PER_REQUEST transforms have already been
requested on the image.
"""
self._check_transform_limits()
transform = images_service_pb.Transform()
transform.set_vertical_flip(True)
self._transforms.append(transform)
def _validate_crop_arg(self, val, val_name):
"""Validate the given value of a Crop() method argument.
Args:
val: float, value of the argument.
val_name: str, name of the argument.
Raises:
TypeError if the args are not of type 'float'.
BadRequestError when there is something wrong with the given bounding box.
"""
if type(val) != float:
raise TypeError("arg '%s' must be of type 'float'." % val_name)
if not (0 <= val <= 1.0):
raise BadRequestError("arg '%s' must be between 0.0 and 1.0 "
"(inclusive)" % val_name)
def crop(self, left_x, top_y, right_x, bottom_y):
"""Crop the image.
The four arguments are the scaling numbers to describe the bounding box
which will crop the image. The upper left point of the bounding box will
be at (left_x*image_width, top_y*image_height) the lower right point will
be at (right_x*image_width, bottom_y*image_height).
Args:
left_x: float value between 0.0 and 1.0 (inclusive).
top_y: float value between 0.0 and 1.0 (inclusive).
right_x: float value between 0.0 and 1.0 (inclusive).
bottom_y: float value between 0.0 and 1.0 (inclusive).
Raises:
TypeError if the args are not of type 'float'.
BadRequestError when there is something wrong with the given bounding box
or if MAX_TRANSFORMS_PER_REQUEST transforms have already been requested
for this image.
"""
self._validate_crop_arg(left_x, "left_x")
self._validate_crop_arg(top_y, "top_y")
self._validate_crop_arg(right_x, "right_x")
self._validate_crop_arg(bottom_y, "bottom_y")
if left_x >= right_x:
raise BadRequestError("left_x must be less than right_x")
if top_y >= bottom_y:
raise BadRequestError("top_y must be less than bottom_y")
self._check_transform_limits()
transform = images_service_pb.Transform()
transform.set_crop_left_x(left_x)
transform.set_crop_top_y(top_y)
transform.set_crop_right_x(right_x)
transform.set_crop_bottom_y(bottom_y)
self._transforms.append(transform)
def im_feeling_lucky(self):
"""Automatically adjust image contrast and color levels.
This is similar to the "I'm Feeling Lucky" button in Picasa.
Raises:
BadRequestError if MAX_TRANSFORMS_PER_REQUEST transforms have already
been requested for this image.
"""
self._check_transform_limits()
transform = images_service_pb.Transform()
transform.set_autolevels(True)
self._transforms.append(transform)
def execute_transforms(self, output_encoding=PNG):
"""Perform transformations on given image.
Args:
output_encoding: A value from OUTPUT_ENCODING_TYPES.
Returns:
str, image data after the transformations have been performed on it.
Raises:
BadRequestError when there is something wrong with the request
specifications.
NotImageError when the image data given is not an image.
BadImageError when the image data given is corrupt.
LargeImageError when the image data given is too large to process.
TransformtionError when something errors during image manipulation.
Error when something unknown, but bad, happens.
"""
if output_encoding not in OUTPUT_ENCODING_TYPES:
raise BadRequestError("Output encoding type not in recognized set "
"%s" % OUTPUT_ENCODING_TYPES)
if not self._transforms:
raise BadRequestError("Must specify at least one transformation.")
request = images_service_pb.ImagesTransformRequest()
response = images_service_pb.ImagesTransformResponse()
request.mutable_image().set_content(self._image_data)
for transform in self._transforms:
request.add_transform().CopyFrom(transform)
request.mutable_output().set_mime_type(output_encoding)
try:
apiproxy_stub_map.MakeSyncCall("images",
"Transform",
request,
response)
except apiproxy_errors.ApplicationError, e:
if (e.application_error ==
images_service_pb.ImagesServiceError.BAD_TRANSFORM_DATA):
raise BadRequestError()
elif (e.application_error ==
images_service_pb.ImagesServiceError.NOT_IMAGE):
raise NotImageError()
elif (e.application_error ==
images_service_pb.ImagesServiceError.BAD_IMAGE_DATA):
raise BadImageError()
elif (e.application_error ==
images_service_pb.ImagesServiceError.IMAGE_TOO_LARGE):
raise LargeImageError()
elif (e.application_error ==
images_service_pb.ImagesServiceError.UNSPECIFIED_ERROR):
raise TransformationError()
else:
raise Error()
self._image_data = response.image().content()
self._transforms = []
self._width = None
self._height = None
return self._image_data
@property
def width(self):
"""Gets the width of the image."""
if self._width is None:
self._update_dimensions()
return self._width
@property
def height(self):
"""Gets the height of the image."""
if self._height is None:
self._update_dimensions()
return self._height
def resize(image_data, width=0, height=0, output_encoding=PNG):
"""Resize a given image file maintaining the aspect ratio.
If both width and height are specified, the more restricting of the two
values will be used when resizing the photo. The maximum dimension allowed
for both width and height is 4000 pixels.
Args:
image_data: str, source image data.
width: int, width (in pixels) to change the image width to.
height: int, height (in pixels) to change the image height to.
output_encoding: a value from OUTPUT_ENCODING_TYPES.
Raises:
TypeError when width or height not either 'int' or 'long' types.
BadRequestError when there is something wrong with the given height or
width.
Error when something went wrong with the call. See Image.ExecuteTransforms
for more details.
"""
image = Image(image_data)
image.resize(width, height)
return image.execute_transforms(output_encoding=output_encoding)
def rotate(image_data, degrees, output_encoding=PNG):
"""Rotate a given image a given number of degrees clockwise.
Args:
image_data: str, source image data.
degrees: value from ROTATE_DEGREE_VALUES.
output_encoding: a value from OUTPUT_ENCODING_TYPES.
Raises:
TypeError when degrees is not either 'int' or 'long' types.
BadRequestError when there is something wrong with the given degrees.
Error when something went wrong with the call. See Image.ExecuteTransforms
for more details.
"""
image = Image(image_data)
image.rotate(degrees)
return image.execute_transforms(output_encoding=output_encoding)
def horizontal_flip(image_data, output_encoding=PNG):
"""Flip the image horizontally.
Args:
image_data: str, source image data.
output_encoding: a value from OUTPUT_ENCODING_TYPES.
Raises:
Error when something went wrong with the call. See Image.ExecuteTransforms
for more details.
"""
image = Image(image_data)
image.horizontal_flip()
return image.execute_transforms(output_encoding=output_encoding)
def vertical_flip(image_data, output_encoding=PNG):
"""Flip the image vertically.
Args:
image_data: str, source image data.
output_encoding: a value from OUTPUT_ENCODING_TYPES.
Raises:
Error when something went wrong with the call. See Image.ExecuteTransforms
for more details.
"""
image = Image(image_data)
image.vertical_flip()
return image.execute_transforms(output_encoding=output_encoding)
def crop(image_data, left_x, top_y, right_x, bottom_y, output_encoding=PNG):
"""Crop the given image.
The four arguments are the scaling numbers to describe the bounding box
which will crop the image. The upper left point of the bounding box will
be at (left_x*image_width, top_y*image_height) the lower right point will
be at (right_x*image_width, bottom_y*image_height).
Args:
image_data: str, source image data.
left_x: float value between 0.0 and 1.0 (inclusive).
top_y: float value between 0.0 and 1.0 (inclusive).
right_x: float value between 0.0 and 1.0 (inclusive).
bottom_y: float value between 0.0 and 1.0 (inclusive).
output_encoding: a value from OUTPUT_ENCODING_TYPES.
Raises:
TypeError if the args are not of type 'float'.
BadRequestError when there is something wrong with the given bounding box.
Error when something went wrong with the call. See Image.ExecuteTransforms
for more details.
"""
image = Image(image_data)
image.crop(left_x, top_y, right_x, bottom_y)
return image.execute_transforms(output_encoding=output_encoding)
def im_feeling_lucky(image_data, output_encoding=PNG):
"""Automatically adjust image levels.
This is similar to the "I'm Feeling Lucky" button in Picasa.
Args:
image_data: str, source image data.
output_encoding: a value from OUTPUT_ENCODING_TYPES.
Raises:
Error when something went wrong with the call. See Image.ExecuteTransforms
for more details.
"""
image = Image(image_data)
image.im_feeling_lucky()
return image.execute_transforms(output_encoding=output_encoding)
| Python |
Subsets and Splits
SQL Console for ajibawa-2023/Python-Code-Large
Provides a useful breakdown of language distribution in the training data, showing which languages have the most samples and helping identify potential imbalances across different language groups.