code stringlengths 1 1.72M | language stringclasses 1 value |
|---|---|
"""
This file demonstrates two different styles of tests (one doctest and one
unittest). These will both pass when you run "manage.py test".
Replace these with more appropriate tests for your application.
"""
from django.test import TestCase
class SimpleTest(TestCase):
def test_basic_addition(self):
"""
Tests that 1 + 1 always equals 2.
"""
self.failUnlessEqual(1 + 1, 2)
__test__ = {"doctest": """
Another way to test that 1 + 1 is equal to 2.
>>> 1 + 1 == 2
True
"""}
| Python |
from django.conf.urls.defaults import *
urlpatterns = patterns('pinkewang.share.views',
)
| Python |
# Create your views here.
| Python |
#!/usr/bin/env python
from django.core.management import execute_manager
try:
import settings # Assumed to be in the same directory.
except ImportError:
import sys
sys.stderr.write("Error: Can't find the file 'settings.py' in the directory containing %r. It appears you've customized things.\nYou'll have to run django-admin.py, passing it your settings module.\n(If the file settings.py does indeed exist, it's causing an ImportError somehow.)\n" % __file__)
sys.exit(1)
if __name__ == "__main__":
execute_manager(settings)
| Python |
#!/usr/bin/env python
#coding:utf-8
import web
import memcache
from web.contrib.template import render_mako
#数据库配置
db = web.database(dbn = 'mysql', db = 'davidblog', user='root', pw = 'root')
#memcache配置
mc = memcache.Client(['127.0.0.1:11211'], debug=0)
#render_mako配置
render = render_mako(
directories = ['/home/icefox/flyingeagle/davidblog/templates'],
input_encoding = 'utf-8',
output_encoding = 'utf-8',
)
| Python |
#coding=utf-8
from web import form
username_validate = form.regexp(r".{3,15}$", u"请输入3-15位的用户名")
email_validate = form.regexp(r".*@.*", u"请输入合法的Email地址")
url_validate = form.regexp(r"http://.*", u"请输入合法的URL地址")
commentForm = form.Form(
form.Textbox('name', form.notnull, username_validate),
form.Textbox('email', form.notnull, email_validate),
form.Textbox('url'),
form.Textarea('comment', form.notnull),
form.Button('submit', type="submit", description=u"留言"),
)
| Python |
#!/usr/bin/env python
#coding:utf-8
import web
import memcache
from web.contrib.template import render_mako
#数据库配置
db = web.database(dbn = 'mysql', db = 'davidblog', user='root', pw = 'root')
#memcache配置
mc = memcache.Client(['127.0.0.1:11211'], debug=0)
#render_mako配置
render = render_mako(
directories = ['/home/icefox/flyingeagle/davidblog/templates'],
input_encoding = 'utf-8',
output_encoding = 'utf-8',
)
| Python |
#!/usr/bin/env python2.6
#coding:utf-8
import web
from settings import render_mako
import views
urls = (
'/', 'views.index',
'/entry/(.*)/', 'views.entry',
'/category/(.*)/', 'views.category',
'/tag/(.*)/', 'views.tag',
'/add_comment/', 'views.addComment',
)
app = web.application(urls, globals(), autoreload = True)
session = web.session.Session(app, web.session.DiskStore('sessions'), initializer={'captcha': 0})
render = render_mako(
directories = ['/home/icefox/flyingeagle/davidblog/templates'],
input_encoding = 'utf-8',
output_encoding = 'utf-8',
)
if __name__ == '__main__':
#web.wsgi.runwsgi = lambda func, addr=None: web.wsgi.runfcgi(func, addr)
app.run()
| Python |
from settings import mc
import pickle
class MCache(object):
def set(self, name, value):
return mc.set(name, pickle.dumps(value))
def get(self, name):
value = mc.get(name)
if value is not None:
return pickle.loads(value)
return None
def delete(self, name):
return mc.delete(name)
mcache = MCache()
| Python |
#!/usr/bin/env python2.6
#coding:utf-8
import web
from settings import render_mako
import views
urls = (
'/', 'views.index',
'/entry/(.*)/', 'views.entry',
'/category/(.*)/', 'views.category',
'/tag/(.*)/', 'views.tag',
'/add_comment/', 'views.addComment',
)
app = web.application(urls, globals(), autoreload = True)
session = web.session.Session(app, web.session.DiskStore('sessions'), initializer={'captcha': 0})
render = render_mako(
directories = ['/home/icefox/flyingeagle/davidblog/templates'],
input_encoding = 'utf-8',
output_encoding = 'utf-8',
)
if __name__ == '__main__':
#web.wsgi.runwsgi = lambda func, addr=None: web.wsgi.runfcgi(func, addr)
app.run()
| Python |
#coding:utf-8
import web
from davidblog import render
from forms import commentForm
from datetime import datetime
from settings import db, render
from cache import mcache
def getCategories():
categories = mcache.get('categories')
if categories is None:
categories = list(db.query('SELECT * FROM categories ORDER BY name ASC'))
mcache.set('categories', categories)
return categories
def getTags():
tags = mcache.get('tags')
if tags is None:
tags = list(db.query('SELECT * FROM tags ORDER BY name ASC'))
mcache.set('tags', tags)
return tags
def getLinks():
links = mcache.get('links')
if links is None:
links = list(db.query('SELECT * FROM links ORDER BY name ASC'))
mcache.set('links', links)
return links
class index(object):
def GET(self):
# 读取当前页的文章
page = web.input(page=1)
page = int(page.page)
entry_count = db.query("SELECT COUNT(id) AS num FROM entries")
pages = float(entry_count[0]['num'] / 10)
if pages > int(pages):
pages = int(pages + 1)
elif pages == 0:
pages = 1
else:
pages = int(pages)
if page > pages:
page = pages
entries = list(db.query("SELECT en.id AS entryId, en.title AS title, en.content AS content, en.slug AS entry_slug, en.createdTime AS createdTime, en.commentNum AS commentNum, ca.id AS categoryId, ca.slug AS category_slug, ca.name AS category_name FROM entries en LEFT JOIN categories ca ON en.categoryId = ca.id ORDER BY createdTime DESC LIMIT $start, 10", vars = {'start':(page - 1) * 10}))
for entry in entries:
entry.tags = db.query("SELECT * FROM entry_tag et LEFT JOIN tags t ON t.id = et.tagId WHERE et.entryId = $id", vars = {'id':entry.entryId})
return render.index(entries = entries, page = page, pages = pages, categories = getCategories(), tags = getTags(), links = getLinks())
class entry(object):
def GET(self, slug):
entry = list(db.query('SELECT en.id AS entryId, en.title AS title, en.content AS content, en.slug AS entry_slug, en.createdTime AS createdTime, en.commentNum AS commentNum, ca.id AS categoryId, ca.slug AS category_slug, ca.name AS category_name FROM entries en LEFT JOIN categories ca ON en.categoryId = ca.id WHERE en.slug = $slug', vars={'slug':slug}))
for one in entry:
one.tags = db.query('SELECT * FROM entry_tag et LEFT JOIN tags t ON t.id = et.tagId WHERE et.entryId = $id', vars = {'id': one.entryId})
one.comments = db.query('SELECT * FROM comments WHERE entryId = $id ORDER BY createdTime DESC', vars = {'id': one.entryId})
f = commentForm()
return render.entry(entry = entry[0], categories = getCategories(), tags = getTags(), links = getLinks(), f = f)
class addComment(object):
def POST(self):
datas = web.input()
createdTime = datetime.now().strftime("%Y-%m-%d %H:%M")
if datas.url =="":
datas.url = "#"
db.insert('comments', entryId = datas.id, username = datas.username, email = datas.email, url = datas.url, createdTime = createdTime, comment = datas.comment)
entry = db.query('SELECT commentNum FROM entries WHERE id = $id', vars = {'id':datas.id})
db.update('entries', where = 'id = %s' % datas.id, commentNum = entry[0].commentNum + 1)
return render.comment(datas = datas, createdTime = createdTime)
class category(object):
def GET(self, slug):
# 读取当前页的文章
page = web.input(page=1)
page = int(page.page)
entry_count = db.query("SELECT COUNT(en.id) AS num FROM entries en LEFT JOIN categories ca ON ca.id = en.categoryId WHERE ca.slug = $slug", vars = {'slug':slug})
pages = float(entry_count[0]['num'] / 10)
if pages > int(pages):
pages = int(pages + 1)
elif pages == 0:
pages = 1
else:
pages = int(pages)
if page > pages:
page = pages
entries = list(db.query('SELECT en.id AS entryId, en.title AS title, en.content AS content, en.slug AS entry_slug, en.createdTime AS createdTime, ca.id AS categoryId, ca.slug AS category_slug, ca.name AS category_name FROM entries en LEFT JOIN categories ca ON ca.id = en.categoryId WHERE ca.slug = $slug ORDER BY en.createdTime DESC LIMIT $start, 10', vars = {'slug':slug, 'start':(page - 1) * 10}))
return render.category(entries = entries, categories = getCategories(), tags = getTags(), links = getLinks(), page = page, pages = pages)
class tag(object):
def GET(self, slug):
tag = db.query('SELECT et.entryId AS id FROM entry_tag et LEFT JOIN tags t ON et.tagId = t.id WHERE t.name = $slug', vars = {'slug':slug})
entry_list = [str(i.id) for i in tag]
# 读取当前页的文章
page = web.input(page=1)
page = int(page.page)
entry_count = len(entry_list)
pages = float(entry_count / 10)
if pages > int(pages):
pages = int(pages + 1)
elif pages == 0:
pages = 1
else:
pages = int(pages)
if page > pages:
page = pages
entries = list(db.query('SELECT en.id AS entryId, en.title AS title, en.content AS content, en.slug AS entry_slug, en.createdTime AS createdTime FROM entries en WHERE en.id in ($ids)', vars = {'ids':','.join(entry_list)}))
return render.tag(entries = entries, categories = getCategories(), tags = getTags(), links = getLinks(), page = page, pages = pages)
| Python |
#!/usr/bin/env python
import sys
from distutils.core import setup
required = []
setup(
name='google-docs-fs',
version='1.0rc2',
description='Treat Google Docs as a local file system',
author='Scott Walton',
author_email='d38dm8nw81k1ng@gmail.com',
license='GPLv2',
url='http://code.google.com/p/google-docs-fs/',
py_modules=['googledocsfs.gFile','googledocsfs.gNet'],
scripts=['gmount','gumount','gmount.py'],
install_requires=['python-fuse>=0.2','python-gdata>=2.0.0']
)
| Python |
#!/usr/bin/env python
import sys
import os
sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
from googledocsfs import gFile
gFile.main()
| Python |
#!/usr/bin/env python
#
# gNet.py
#
# Copyright 2009 Scott C. Walton <d38dm8nw81k1ng@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License (version 2), as
# published by the Free Software Foundation
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
import os
import gdata.docs.service
import gdata.docs
from gdata import MediaSource
class GNet(object):
"""
Performs all the main interfacing with Google Docs server as well
as storing the user's session data
"""
def __init__(self, em, pw):
"""
Purpose: Login to Google Docs and store the session cookie
em: A String containing the user's email address
pw: A String containing the user's password
Returns: A GNet object for accessing the GData Docs
"""
self.gd_client = gdata.docs.service.DocsService()
self.gd_client.email = em
self.gd_client.password = pw
self.gd_client.source = 'google-docs-fs'
self.gd_client.ssl = True
self.gd_client.ProgrammaticLogin()
self.codec = 'utf-8'
def get_docs(self, filetypes = None, folder = None):
"""
Purpose: Retrieve a list of all documents
filetypes: A List containing the filetypes to query
folder: A String containing the folder to search in
Returns: A List of documents specified by filetypes: Type gdata.docs.DocumentListFeed
"""
query = gdata.docs.service.DocumentQuery(categories = filetypes)
query['showfolders'] = 'true'
if folder is not None:
query.AddNamedFolder(self.gd_client.email, folder.encode(self.codec))
return self.gd_client.Query(query.ToUri())
def get_filename(self, path, showfolders = 'false'):
"""
Purpose: Retrieves the file referred to by path from Google
path: A String containing the path elements of the file
showfolders: Either 'true' or 'false' - whether get_filename
should also retrieve folders (default: 'false')
Returns: The gdata List Entry object containing the file or None if none exists
"""
name = os.path.basename(path)
title = os.path.splitext(name)[0]
pe = path.split('/')
query = gdata.docs.service.DocumentQuery()
query['title'] = title.encode(self.codec)
query['title-exact'] = 'true'
query['showfolders'] = showfolders
feed = self.gd_client.Query(query.ToUri())
filetype_filter = []
# Filter out any files that don't match the case
for f in feed.entry:
if f.title.text.decode(self.codec) == title:
filetype_filter.append(f)
# Return the first file encountered in the folder
# Fix this to be more precise in the final version
# Need to implement file extensions, then I should be able to
# check those to get the filetype and a more accurate file
# May also need to go through the entire file hierarchy to
# ensure the integrity of the path. May be slower but will be
# essential to ensure the user doesnt unwittingly erase a
# random file stored elsewhere
if len(filetype_filter) == 1: ## Assume it is the correct one
return filetype_filter[0]
for entry in filetype_filter:
## Assume that if there's only 1 then it's the correct one.
if os.path.dirname(path) == '/' or len(entry.category) is 1:
return entry
## This doesn't seem to work any more
for c in entry.category:
if pe[-2].encode(self.codec) in c.label:
return entry
def erase(self, path, folder = False):
"""
Purpose: Erase a file
path: String containing path to the file to erase
"""
if folder is True:
file = self.get_filename(path, showfolders = 'true')
else:
file = self.get_filename(path)
self.gd_client.Delete(file.GetEditLink().href)
def upload_file(self, path):
"""
Purpose: Uploads a file to Google Docs
path: String containing path of the file to be uploaded
"""
mime = gdata.docs.service.SUPPORTED_FILETYPES[path[-3:].upper()]
filename = os.path.basename(path)
title = filename[:-4]
dir = os.path.dirname(path)
media = MediaSource(file_path = path.encode(self.codec), content_type = mime)
if mime in ['CSV', 'ODS', 'XLS']:
entry = self.gd_client.UploadSpreadsheet(media, title)
if mime in ['PPT', 'PPS']:
entry = self.gd_client.UploadPresentation(media, title)
else:
entry = self.gd_client.UploadDocument(media, title)
if dir != '/':
type = entry.GetDocumentType()
entry_to = self.get_filename(os.path.basename(dir), showfolders = 'true')
if type == 'document':
self.gd_client.MoveDocumentIntoFolder(entry, entry_to)
elif type == 'spreadsheet':
self.gd_client.MoveSpreadsheetIntoFolder(entry, entry_to)
elif type == 'presentation':
self.gd_client.MovePresentationIntoFolder(entry, entry_to)
def create_dir(self, path):
"""
Purpose: Create a directory referred to by path
path: A list containing the path to the directory to be created
"""
#Check if folder is in root
if len(path) > 1:
parent_dir = path[-2]
else:
parent_dir = None
if parent_dir is None:
self.gd_client.CreateFolder(path[-1])
else:
parent_entry = self.get_filename(parent_dir, showfolders = 'true')
self.gd_client.CreateFolder(path[-1], parent_entry)
def get_file(self, path, tmp_path, flags):
"""
Purpose: Get the file referred to by path off Google Docs.
path: A string containing the path to the file to download
flags: A string giving the flags to open the file with
Returns: The file requested, or -1 if the file doesn't exist
"""
filename = os.path.basename(path)
doc = self.get_filename(path)
## If doc is a new file
if doc is None:
import stat
os.mknod(tmp_path.encode(self.codec), 0700 | stat.S_IFREG)
return open(tmp_path.encode(self.codec), flags)
filetype = doc.GetDocumentType()
if filetype == 'spreadsheet':
import gdata.spreadsheet.service
spreadsheets_client = gdata.spreadsheet.service.SpreadsheetsService()
spreadsheets_client.ClientLogin(self.gd_client.email, self.gd_client.password)
# substitute the spreadsheets token into our gd_client
docs_auth_token = self.gd_client.GetClientLoginToken()
self.gd_client.SetClientLoginToken(spreadsheets_client.GetClientLoginToken())
self.gd_client.Export(doc.resourceId.text, tmp_path.encode(self.codec))
self.gd_client.SetClientLoginToken(docs_auth_token)
else:
print doc.resourceId.text
self.gd_client.Export(doc.resourceId.text, tmp_path.encode(self.codec))
return open(tmp_path.encode(self.codec), flags)
def update_file_contents(self, path, tmp_path):
"""
Purpose: Update the contents of the file specified by path
path: String containing path to file to update
"""
mime = gdata.docs.service.SUPPORTED_FILETYPES[path[-3:].upper()]
ms = gdata.MediaSource(file_path = tmp_path.encode(self.codec), content_type = mime)
entry = self.get_filename(path)
self.gd_client.Put(data = entry, uri = entry.GetEditMediaLink().href, media_source = ms)
def make_folder(self, path):
"""
Purpose: Create a folder specified by path
path: String containing path to folder to create
"""
if os.path.dirname(path) == '/':
self.gd_client.CreateFolder(os.path.basename(path).encode(self.codec))
else:
parent_dir = self.get_filename(os.path.dirname(path), showfolders = 'true')
self.gd_client.CreateFolder(os.path.basename(path).encode(self.codec), parent_dir)
def move_file(self, pathfrom, pathto):
"""
Purpose: Move a file from one folder to another
pathfrom: String containing path to file to move
pathto: String containing path to move to
"""
folderfrom = os.path.dirname(pathfrom)
folderto = os.path.dirname(pathto)
namefrom = os.path.basename(pathfrom)
if folderfrom != '/':
ffe = self.get_filename(folderfrom, showfolders = 'true')
feed = self.gd_client.GetDocumentListFeed(ffe.content.src)
for entry in feed.entry:
if unicode(entry.title.text, self.codec) == namefrom[:-4]:
entry_from = entry
self.gd_client.MoveOutOfFolder(entry_from)
entry_from = self.get_filename(pathfrom, showfolders = 'true')
entry_to = self.get_filename(folderto, showfolders = 'true')
type = entry_from.GetDocumentType()
if type == 'folder':
self.gd_client.MoveFolderIntoFolder(entry_from, entry_to)
elif type == 'document':
self.gd_client.MoveDocumentIntoFolder(entry_from, entry_to)
elif type == 'spreadsheet':
self.gd_client.MoveSpreadsheetIntoFolder(entry_from, entry_to)
elif type == 'presentation':
self.gd_client.MovePresentationIntoFolder(entry_from, entry_to)
if os.path.basename(pathfrom) != os.path.basename(pathto):
entry_from = self.rename_file(entry_from, os.path.basename(pathto))
return 0
def rename_file(self, entry, name_to):
"""
Purpose: Renames an entry
entry_from: GDataListEntry to change the name of
name_to: String name to change to
Returns: GDataListEntry of renamed file
"""
entry.title.text = name_to
return self.gd_client.Put(entry, entry.GetEditLink().href)
def main():
"""
Purpose: Used for Testing Only.
Returns: 0 to indicate successful execution
"""
return 0
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# gFile.py
#
# Copyright 2008-2009 Scott C. Walton <d38dm8nw81k1ng@gmail.com>
# truncate() function written by miGlanz@gmail.com
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License (version 2), as
# published by the Free Software Foundation
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
import stat
import os
import sys
import threading
import platform
import errno
import time
import fuse
import gNet
import getpass
from subprocess import *
fuse.fuse_python_api = (0,2)
class GStat(fuse.Stat):
"""
The stat class to use for getattr
"""
def __init__(self):
"""
Purpose: Sets the attributes to folder attributes
Returns: Nothing
"""
self.st_mode = stat.S_IFDIR | 0744
self.st_ino = 0
self.st_dev = 0
self.st_nlink = 2
self.st_uid = os.getuid()
self.st_gid = os.getgid()
self.st_size = 4096
self.st_atime = time.time()
self.st_mtime = self.st_atime
self.st_ctime = self.st_atime
def set_file_attr(self, size):
"""
Purpose: Set attributes of a file
size: int the file's size in bytes
"""
self.st_mode = stat.S_IFREG | 0744
self.st_nlink = 1
self.st_size = size
def set_access_times(self, mtime, ctime, atime = None):
"""
Purpose: Set the access times of a file
mtime: int modified time
ctime: int creation time
atime: int access time
"""
self.st_mtime = mtime
self.st_atime = ctime
if atime is not None and atime > 0:
self.st_atime = atime
class GFile(fuse.Fuse):
"""
The main Google Docs filesystem class. Most work will be done
in here.
"""
def __init__(self, em, pw, *args, **kw):
"""
Purpose: Connect to the Google Docs Server and verify credentials
em: User's email address
pw: User's password
*args: Args to pass to Fuse
**kw: Keywords to pass to Fuse
Returns: Nothing
"""
super(GFile, self).__init__(*args, **kw)
self.gn = gNet.GNet(em, pw)
self.directories = {}
self.files = {}
self.written = {}
self.time_accessed = {}
self.release_lock = threading.RLock()
self.to_upload = {}
self.codec = 'utf-8'
self.home = unicode('%s/.google-docs-fs' % (os.path.expanduser('~'),), self.codec)
if os.uname()[0] == 'Darwin':
self.READ = 0
self.WRITE = 1
self.READWRITE = 2
else:
self.READ = 32768
self.WRITE = 32769
self.READWRITE = 32770
self.APPEND = 337932
self.APPENDRW = 33794
def getattr(self, path):
"""
Purpose: Get information about a file
path: String containing relative path to file using mountpoint as /
Returns: a GStat object with some updated values
"""
path = unicode(path, self.codec)
filename = os.path.basename(path)
if '/' not in self.files:
self.files['/'] = GStat()
if path in self.files:
st = self.files[path]
elif filename[0] == '.':
st = os.stat(('%s%s' % (self.home, path)).encode(self.codec))
else:
f = self.gn.get_filename(path, 'true')
if f is None:
return -errno.ENOENT
self._setattr(path = path, entry = f)
st = self.files[path]
return st
def readdir(self, path, offset):
"""
Purpose: Give a listing for ls
path: String containing relative path to file using mountpoint as /
offset: Included for compatibility. Does nothing
Returns: Directory listing for ls
"""
dirents = ['.', '..']
path = unicode(path, self.codec)
filename = os.path.basename(path)
if path == '/': # Root
excludes = []
self.directories['/'] = []
feed = self.gn.get_docs(filetypes = ['folder'])
for dir in feed.entry:
excludes.append('-' + dir.title.text.decode(self.codec))
self.directories['%s%s' % (path, dir.title.text.decode(self.codec))] = []
if len(excludes) > 0:
i = 0
while i < len(excludes):
excludes[i] = excludes[i].encode(self.codec)
i += 1
feed = self.gn.get_docs(filetypes = excludes)
else:
feed = self.gn.get_docs() # All must be in root folder
for file in feed.entry:
if file.GetDocumentType() == 'folder':
self.directories['/'].append('%s' % (file.title.text.decode(self.codec), ))
else:
self.directories['/'].append("%s.%s" % (file.title.text.decode(self.codec), self._file_extension(file)))
elif filename[0] == '.': #Hidden - ignore
pass
else: #Directory
self.directories[path] = []
feed = self.gn.get_docs(folder = filename)
for file in feed.entry:
if file.GetDocumentType() == 'folder':
self.directories[os.path.join(path, file.title.text.decode(self.codec))] = []
self.directories[path].append(file.title.text.decode(self.codec))
else:
self.directories[path].append("%s.%s" % (file.title.text.decode(self.codec), self._file_extension(file)))
for entry in self.directories[path]:
dirents.append(entry)
if 'My folders' in dirents:
dirents.remove('My folders')
# Set the appropriate attributes for use with getattr()
for file in feed.entry:
p = os.path.join(path, file.title.text.decode(self.codec))
if file.GetDocumentType() != 'folder':
p = '%s.%s' % (p, self._file_extension(file))
self._setattr(path = p, entry = file)
# Display all hidden files in dirents
tmp_path = '%s%s' % (self.home, path)
try:
os.makedirs(tmp_path.encode(self.codec))
except OSError:
pass
if os.path.exists(tmp_path.encode(self.codec)):
for file in [f for f in os.listdir(tmp_path.encode(self.codec)) if f[0] == '.']:
dirents.append(file)
self._setattr(path = os.path.join(tmp_path, file))
for r in dirents:
yield fuse.Direntry(r.encode(self.codec))
def mknod(self, path, mode, dev):
"""
Purpose: Create file nodes. Use mkdir to create directories
path: Path of file to create
mode: Ignored (for now)
dev: Ignored (for now)
Returns: 0 to indicate succes
"""
path = unicode(path, self.codec)
filename = os.path.basename(path)
dir = os.path.dirname(path)
tmp_path = '%s%s' % (self.home, path)
tmp_dir = '%s%s' % (self.home, dir)
if filename[0] != '.':
self.to_upload[path] = True
else:
try:
os.makedirs(tmp_dir.encode(self.codec), 0644)
except OSError:
pass #Assume that it already exists
os.mknod(tmp_path.encode(self.codec), 0644)
self._setattr(path = path)
self.files[path].set_file_attr(0)
self.directories[dir].append(filename)
return 0
def open(self, path, flags):
"""
Purpose: Open the file referred to by path
path: String giving the path to the file to open
flags: String giving Read/Write/Append Flags to apply to file
Returns: Pointer to file
"""
path = unicode(path, self.codec)
filename = os.path.basename(path)
tmp_path = '%s%s' % (self.home, path)
## I think that's all of them. The others are just different
## ways of representing the one defined here
## Buffer will just be written to a new temporary file and this
## will then be uploaded
if flags == self.READ:
f = 'r'
elif flags == self.WRITE:
f = 'w'
elif flags == self.READWRITE:
f = 'r+'
elif flags == self.APPEND:
f = 'a'
elif flags == self.APPENDRW:
f = 'a+'
elif type(f) is str: # Assume that it was passed from self.read()
f = flags
else:
f = 'a+' # Just do something to make it work ;-)
if not os.path.exists(tmp_path):
try:
os.makedirs(os.path.dirname(tmp_path))
except OSError:
pass #Assume path exists
if filename[0] != '.':
file = self.gn.get_file(path, tmp_path, f)
else:
file = open(tmp_path.encode(self.codec), f)
else:
file = open(tmp_path.encode(self.codec), f)
self.files[path].st_size = os.path.getsize(tmp_path.encode(self.codec))
return file
def write(self, path, buf, offset, fh = None):
"""
Purpose: Write the file to Google Docs
path: Path of the file to write as String
buf: Data to write to Google Docs
offset: Ignored (for now)
fh: File to read
Returns: 0 to indicate success
"""
path = unicode(path, self.codec)
filename = os.path.basename(path)
tmp_path = '%s%s' % (self.home, path)
if fh is None:
fh = open(tmp_path.encode(self.codec), 'wb')
fh.seek(offset)
fh.write(buf)
if filename[0] != '.':
self.written[path] = True
self.time_accessed[path] = time.time()
return len(buf)
def flush(self, path, fh = None):
"""
Purpose: Flush the write data and upload it to Google Docs
path: String containing path to file to flush
fh: File Handle
"""
if fh is not None:
fh.close()
def unlink(self, path):
"""
Purpose: Remove a file
path: String containing relative path to file using mountpoint as /
"""
path = unicode(path, self.codec)
filename = os.path.basename(path.encode(self.codec))
if filename[0] == '.':
tmp_path = u'%s%s' % (self.home, path)
if os.path.exists(tmp_path.encode(self.codec)):
if os.path.isdir(tmp_path.encode(self.codec)):
return -errno.EISDIR
os.remove(tmp_path.encode(self.codec))
return 0
else:
return -errno.ENOENT
if path in self.directories:
return -errno.EISDIR
try:
self.gn.erase(path)
except AttributeError, e:
return -errno.ENOENT
def read(self, path, size = -1, offset = 0, fh = None):
"""
Purpose: Read from file pointed to by fh
path: String Path to file if fh is None
size: Int Number of bytes to read
offset: Int Offset to start reading from
fh: File to read
Returns: Bytes read
"""
path = unicode(path, self.codec)
filename = os.path.basename(path)
if fh is None:
fh = self.open(path.encode(self.codec), 'rb+')
fh.seek(offset)
buf = fh.read(size)
tmp_path = '%s%s' % (self.home, path)
self.time_accessed[tmp_path] = time.time()
return buf
def release(self, path, flags, fh = None):
"""
Purpose: Called after a file is closed
path: String containing path to file to be released
flags: Ignored
fh: File Handle to be released
"""
self.release_lock.acquire()
path = unicode(path, self.codec)
filename = os.path.basename(path)
tmp_path = '%s%s' % (self.home, path)
if path in self.to_upload and path in self.written:
self.gn.upload_file(tmp_path)
del self.to_upload[path]
elif os.path.exists(tmp_path):
if path in self.written:
self.gn.update_file_contents(path, tmp_path)
del self.written[path]
for t in self.time_accessed:
if time.time() - self.time_accessed[t] > 300:
os.remove(t.encode(self.codec))
self.release_lock.release()
def mkdir(self, path, mode):
"""
Purpose: Make a directory
path: String containing path to directory to create
mode: Ignored (for now)
"""
path = unicode(path, self.codec)
dir, filename = os.path.split(path)
tmp_path = '%s%s' % (self.home, path)
if path in self.directories:
return -errno.EEXIST
if dir in self.directories:
self.directories[os.path.dirname(path)].append(filename)
else:
return -errno.ENOENT
self.gn.make_folder(path)
self.directories[path] = []
self._setattr(path, file = False)
os.makedirs(tmp_path.encode(self.codec))
return 0
def rmdir(self, path):
"""
Purpose: Remove a directory referenced by path
path: String containing path to directory to remove
"""
path = unicode(path, self.codec)
tmp_path = '%s%s' % (self.home, path)
filename = os.path.basename(path)
self.readdir(path, 0)
if path in self.directories:
if len(self.directories[path]) == 0: #Empty
self.gn.erase(path, folder = True)
self.directories[os.path.dirname(path)].remove(filename)
del self.files[path]
del self.directories[path]
os.removedirs(tmp_path.encode(self.codec))
else:
return -errno.ENOTEMPTY
else:
return -errno.ENOENT
return 0
def rename(self, pathfrom, pathto):
"""
Purpose: Move file to new location. Cannot rename in place.
pathfrom: String path of file to move
pathto: String new file path
"""
pathfrom = unicode(pathfrom, self.codec)
pathto = unicode(pathto, self.codec)
tmp_path_from = '%s%s' % (self.home, pathfrom)
tmp_path_to = '%s%s' % (self.home, pathto)
if pathfrom == pathto:
return -errno.EEXIST
elif os.path.dirname(pathfrom) == os.path.dirname(pathto):
return -errno.ESAMEDIR
else: ## Move the file
if os.path.exists(tmp_path_from.encode(self.codec)):
os.rename(tmp_path_from, tmp_path_to)
if pathfrom in self.directories:
self.directories[pathto] = self.directories[pathfrom]
del self.directories[pathfrom]
self.files[pathto] = self.files[pathfrom]
del self.files[pathfrom]
if os.path.basename(pathfrom) in self.directories[os.path.dirname(pathfrom)]:
self.directories[os.path.dirname(pathfrom)].remove(os.path.basename(pathfrom))
self.directories[os.path.dirname(pathto)].append(os.path.basename(pathto))
self.gn.move_file(pathfrom, pathto)
return 0
def truncate(self, path, length, *args, **kwargs):
path = unicode(path, self.codec)
filename = os.path.basename(path)
tmp_path = '%s%s' % (self.home, path)
fh = open(tmp_path.encode(self.codec), 'r+')
fh.truncate(length)
fh.close()
if filename[0] != '.':
self.written[path] = True
self.time_accessed[path] = time.time()
return 0
def _setattr(self, path, entry = None, file = True):
"""
Purpose: Set the getattr information for entry
path: String path to file
entry: DocumentListEntry object to extract data from
file: Boolean set to false if setting attributes of a folder
"""
self.files[path] = GStat()
if entry:
if entry.GetDocumentType() != 'folder':
self.files[path].set_file_attr(len(path))
#Set times
if entry.lastViewed is None:
self.files[path].set_access_times(self._time_convert(entry.updated.text.decode(self.codec)),
self._time_convert(entry.published.text.decode(self.codec)))
else:
self.files[path].set_access_times(self._time_convert(entry.updated.text.decode(self.codec)),
self._time_convert(entry.published.text.decode(self.codec)),
self._time_convert(entry.lastViewed.text.decode(self.codec)))
else:
if file:
self.files[path].set_file_attr(len(path))
def _time_convert(self, t):
"""
Purpose: Converts the GData String time to UNIX Time
t: String representation of GData's time format
Returns: Integer conversion of t in UNIX Time
"""
return int(time.mktime(tuple([int(x) for x in (t[:10].split('-')) + t[11:19].split(':')]) + (0,0,0)))
def _file_extension(self, entry):
"""
Purpose: Determine the file extension for the given entry
entry: DocumentListEntry object to scan for filetype
Returns: String of length 3 with file extension (Currently only Oasis filetypes)
"""
if entry.GetDocumentType() == 'document':
return 'doc'
elif entry.GetDocumentType() == 'spreadsheet':
return 'xls'
elif entry.GetDocumentType() == 'presentation':
return 'ppt'
#Should never reach this - used for debugging
return entry.GetDocumentType()
def main():
"""
Purpose: Mount the filesystem
Returns: 0 To indicate successful operation
"""
usage = """Google Docs FS: Mounts Google Docs files on a local
filesystem gFile.py email password mountpoint""" + fuse.Fuse.fusage
passwd = None
while not passwd:
passwd = getpass.getpass()
#GFile expects things in the reverse order
sys.argv[1], sys.argv[2] = sys.argv[2], sys.argv[1]
gfs = GFile(sys.argv[1], passwd, version = "%prog " + fuse.__version__,
usage = usage, dash_s_do='setsingle')
gfs.parse(errex=1)
gfs.main()
return 0
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/env python
import time
class StopWatch(object):
"""A small object to simulate a typical stopwatch."""
def __init__(self):
self.restart()
def __str__(self):
return str(time.time()-self.start)
def restart(self):
self.start = time.time()
| Python |
#!/usr/bin/env python
try:
import argparse
except ImportError:
import zipfile
import urllib2
import os
# A non-seekable stream doesn't suffice, downloading
open('argparse.zip', 'wb').write(urllib2.urlopen(
'http://argparse.googlecode.com/files/argparse-1.1.zip'
).read())
# Extract the module file
f = zipfile.ZipFile('argparse.zip')
open('argparse.py', 'w').write(f.read('argparse-1.1/argparse.py'))
f.close()
# Compile
import argparse
# Cleanup
os.remove('argparse.py')
os.remove('argparse.zip')
else:
print "Error: argparse is already available"
| Python |
#!/usr/bin/env python
from itertools import count, izip, dropwhile
from collections import deque
from operator import itemgetter
from bisect import bisect_left
import sys
import os
import time
import cPickle
import numpy as np
# additional modules
from vrptw import *
from vrptw.consts import *
from compat import *
# NEIGBOURHOOD OPERATORS - single step trials
operations = dict()
def operation(func):
"""A decorator for single solution operations."""
operations[func.__name__] = func
return func
@operation
def op_greedy_single(sol, randint = r.randint):
"""Neighbourhood operator - remove random customer and insert back."""
# pick a route
r = randint(0, sol.k-1)
pos = randint(0, sol.r[r][R_LEN]-2)
c = remove_customer(sol, r, pos)
insert_customer(sol, c)
@operation
def op_greedy_multiple(sol, randint = r.randint):
"""Remove a few customers from a random route and insert them back."""
r = randint(0, sol.k-1)
num_removed = randint(1, min(9, sol.r[r][R_LEN]-1))
removed = []
for i in xrange(num_removed):
removed.append(remove_customer(sol, r, randint(0, sol.r[r][R_LEN]-2)))
for c in removed:
insert_customer(sol, c)
def pick_short_route(sol, random=r.random):
"""Pick a route with preference for shortest."""
r_lengths = np.array([1.0/(rt[R_LEN]-1) for rt in sol.r]).cumsum()
return bisect_left(r_lengths, random()*r_lengths[-1])
def pick_long_route(sol, random=r.random):
"""Return a random route, with preference for the longer."""
lengths = np.array([rt[R_LEN]-1 for rt in sol.r]).cumsum()
return bisect_left(lengths, random()*lengths[-1])
@operation
def op_fight_shortest(sol, random=r.random, randint=r.randint):
"""Picks and tries to empty a random route with preference for shortest."""
r = pick_short_route(sol)
num_removed = min(randint(1, 10), sol.r[r][R_LEN]-1)
removed = []
for i in xrange(num_removed):
removed.append(remove_customer(sol, r, randint(0, sol.r[r][R_LEN]-2)))
for c in removed:
insert_customer(sol, c)
@operation
def op_tabu_single(sol, randint = r.randint, choice=r.choice):
"""Pick one customer from a random route and move him to a different."""
r = pick_short_route(sol)
old_len = sol.r[r][R_LEN]
old_k = sol.k
pos = randint(0, old_len-2)
c = remove_customer(sol, r, pos)
# d("Route %d, from %d, removed customer %d"%(r,pos,c))
for tries in xrange(sol.k-1):
# max k tries
r2 = randint(0, sol.k-2)
# picking all other with equal probability
if r2 >= r and sol.k == old_k: r2 +=1
# d("other route %d" % r2)
if sol.r[r2][R_LEN] < old_len:
continue
candidates = sorted(find_allpos_on(sol, c, r2))
if not candidates:
continue
dist, pos = candidates[-1] # choice(candidates)
# d("found pos %d (%.2f inc)" % (pos, dist))
insert_at_pos(sol, c, r2, pos)
return
# customer c from r failed to move - insert him back
u.undo()
@operation
def op_tabu_shortest(sol, randint = r.randint):
r = pick_short_route(sol)
num_removed = randint(1, sol.r[r][R_LEN]-1)
removed = []
for i in xrange(num_removed):
removed.append(remove_customer(sol, r, randint(0, sol.r[r][R_LEN]-2)))
for c in removed:
tried = set()
found = False
for tries in xrange(sol.k*2):
# max k tries
r2 = pick_long_route(sol)
if r2 in tried:
continue
# print "Long route", r2
# time.sleep(0.6)
dist, pos = find_bestpos_on(sol, c, r2)
if pos:
insert_at_pos(sol, c, r2, pos)
found = True
break
tried.add(r2)
if not found:
u.undo()
return
##print "We displaced %d customers from %d:" % (num_removed, r), removed
# major solution functions (metaheuristics)
def build_first(sol):
"""Greedily construct the first solution."""
sol.reset()
for c in sol.task.getSortedCustomers():
insert_customer(sol, c[ID])
sol.mem['init_order'] = VrptwTask.sort_order
u.commit()
sol.loghist()
def build_by_savings(sol, wait_limit = None, mi = 1):
"""Construct a new solution by savings heuristic."""
def check_saving(x, y):
"""Compute and return possible saving for concatenating x and y.
The return value is a tuple (saving, wait_time)"""
xk, _, arr_xk, _ = sol.r[x][R_EDG][-1]
_, y0, _, larr_y0 = sol.r[y][R_EDG][0]
arr_y0 = arr_xk + sol.t(xk, y0)
wait_y0 = max(0, sol.a(y0) - arr_y0)
if (sol.r[x][R_CAP]+sol.r[y][R_CAP] > sol.task.capa
or arr_y0 > larr_y0
or (wait_limit and wait_y0 > wait_limit) ):
return None, None
return sol.d(xk, 0) + sol.d(0, y0) - mi*sol.d(xk, y0), wait_y0
def list_savings():
"""Return list of possible savings as [(saving, route 1, route 2)]."""
savings = []
for i in xrange(sol.k):
for j in xrange(sol.k):
if i <> j:
s, w = check_saving(i, j)
if s is not None:
savings.append((s, -w, i, j))
return savings
sol.reset()
for c in xrange(sol.task.N):
insert_new(sol, c+1)
while True:
savings = list_savings()
if len(savings) == 0:
break
sav, wt, r1, r2 = max(savings)
# print 'saving', sav, 'by join of', r1, r2, 'wait', wt, 'in', sol.task.name
print "Joining %d and %d for %.1f saving." % (sol.r[r1][R_EDG][-1][E_FRO], sol.r[r2][R_EDG][0][E_TOW], sav)
join_routes(sol, r1, r2)
# turned off for efficiency (now assumed correct)
# sol.check()
u.commit()
return sol
def build_by_mfsavings(sol, wait_limit = None, mi = 1):
"""Build by maybe faster savings heuristic implementation.
Should actually provide the same results as 'normal' O(n**3) savings."""
sol.reset()
for c in xrange(sol.task.N):
insert_new(sol, c+1)
prevs = [0] * (sol.task.N+1)
nexts = [0] * (sol.task.N+1)
route = [None]+sol.r
possible = [ (mi*sol.d(i,j) - sol.d(0, i) - sol.d(j, 0), i, j)
for i in xrange(1, sol.task.N+1)
for j in xrange(1, sol.task.N+1)
if i <> j ]
possible.sort()
# TODO: check validity and perform savings
for sav, i, j in possible:
# third condition: already joint (ends of the same route)
if nexts[i] or prevs[j] or route[i]==route[j]:
continue
# check arrivals
xk, _, arr_xk, _ = route[i][R_EDG][-1]
_, y0, _, larr_y0 = route[j][R_EDG][0]
arr_y0 = arr_xk + sol.t(xk, y0)
wait_y0 = max(0, sol.a(y0) - arr_y0)
if (route[i][R_CAP]+route[j][R_CAP] > sol.task.capa
or arr_y0 > larr_y0
or (wait_limit and wait_y0 > wait_limit) ):
continue
# join routes
print "Joining %d and %d for %.1f saving." % (i, j, -sav)
prevs[j] = i
nexts[i] = j
# remember last customer, before joining
last_rj = route[j][R_EDG][-1][E_FRO]
prevdist = sol.dist
join_routes_ref(sol, route[i], route[j])
# careful - this must come after joining
route[last_rj] = route[i]
# print "result:\n", route[i]
# from compat import print_like_Czarnas
# print_like_Czarnas(sol, sparse=True)
# TURN ON IF NECCESSARY:
# sol.check_full()
return sol
def local_search(sol, oper, end=0, verb=False, speed=None):
"""Optimize solution by local search."""
# local rebinds
ci=u.commit; undo=u.undo; val=sol.val
oldval = val()
from time import time
# stats
updates = 0
steps = 0
start = time()
if end == 0:
end = time()+3
while time() < end:
steps += 1
oper(sol)
newval = val()
if newval < oldval:
oldval = newval
updates += 1
sol.loghist()
ci()
elif val()[0] == oldval[0]:
# huh, not worse, when it comes to routes
ci()
else:
undo()
elapsed = time()-start
if verb:
print " ".join([ sol.infoline(),
"%.1f s, %.2f fps, %d acc (%.2f aps)" % (
elapsed, steps/elapsed, updates, updates/elapsed) ])
# fps measurement from outside
if not speed is None:
speed.append(steps/elapsed)
sol.loghist()
return sol
# MISC. SOLUTION FUNCTIONS - postprocessing
def plot_history(sol):
"""Display a matplotlib graph of solution progress"""
from matplotlib import pyplot as plt
k, dist, t = zip(*sol.history)
fig = plt.figure()
fig.suptitle(sol.task.name+" "+sol.infoline())
# subplot of routes vs. time
kplt = fig.add_subplot(121)
kline = kplt.plot(t, k, 'g')
min_k = (sol.task.best_k or 2)-2
# scaling
kplt.axis([0, sol.history[-1][2], min_k, max(k)+1])
# labels etc.
plt.xlabel('time [s]')
plt.ylabel('routes (k)')
if sol.task.best_k:
kplt.axhline(sol.task.best_k+0.03)
# subplot of distance vs. time
dplt = fig.add_subplot(122)
dline = dplt.plot(t, dist, 'g')
# scaling the plot
min_d = min(dist+(sol.task.best_dist,))
max_d = max(dist+(sol.task.best_dist,))
span_d = max_d - min_d
dplt.axis([0, sol.history[-1][2], min_d-span_d/20., max_d+span_d/20.])
# decoration with labels, etc.
plt.grid(True)
dplt.set_xlabel('time [s]')
dplt.set_ylabel('dist')
dplt.yaxis.set_label_position("right")
dplt.yaxis.set_ticks_position("right")
if sol.task.best_dist:
dplt.axhline(sol.task.best_dist)
plt.show()
# aggressive route minimization
def find_replace_pos_on(sol, c, r):
"""Return a position (occupied), where the customer could be inserted."""
# pull out deep things locally
time = sol.task.time
cust = sol.task.cust
dist = sol.task.dist
c_A = cust[c][A]
c_B = cust[c][B]
edges = sol.r[r][R_EDG]
q_out = sol.r[r][R_CAP] + cust[c][DEM] - sol.task.capa
# customers - d - deleted, a - starting, b - final, c - inserted
a, d, arr_a, _ = edges[0]
for pos in xrange(1, len(edges)):
d, b, arr_d, larr_b = edges[pos]
# check for too early positions, and weight constraint
if c_A > larr_b or cust[d][DEM] < q_out:
a, d, arr_a, larr_d = d, b, arr_d, larr_b
continue
# check for too late - end of scan
if arr_a > c_B:
break
arr_c = max(c_A, arr_a+time[a][c])
arr_b = max(cust[b][A], arr_c+time[c][b])
larr_c = min(c_B, larr_b-time[c][b])
larr_a = min(cust[a][B], larr_c-time[c][b])
if arr_a <= larr_a and arr_c <= larr_c and arr_b <= larr_b:
distinc = dist[a][c]+dist[c][b]-(dist[a][d]+dist[d][b])
yield (distinc, pos-1)
# for next loop pass:
a, d, arr_a, larr_d = d, b, arr_d, larr_b
def find_replace_pos(sol, c):
for r in xrange(len(sol.r)):
# replacing single customer makes little sense
if sol.r[r][R_LEN] > 2:
for distinc, pos in find_replace_pos_on(sol, c, r):
yield (distinc, r, pos)
def short_light_route(sol):
"""Return the index of the shortest of the three lightest routes."""
from heapq import nsmallest
if sol.k > 3:
candidates = nsmallest(3, xrange(sol.k), key=lambda x: sol.r[x][R_CAP])
else:
candidates = xrange(sol.k)
return min( (sol.r[i][R_LEN], i) for i in candidates )[1]
def remove_route(sol, r):
"""Remove a route and retur a list of its customers."""
data = u.pop(sol.r, r)
cust = map(itemgetter(0), data[R_EDG])[1:]
u.ada(sol, 'k', -1)
u.ada(sol, 'dist', -data[R_DIS])
return cust
@operation
def op_route_min(sol, route=None, random=r.random, randint=r.randint, data=dict(die=0)):
"""Emulate the route minimization (RM) heuristic by Nagata et al."""
from collections import deque, defaultdict
if route is None:
r = short_light_route(sol)
else:
r = route
# print "I'll try to eliminate route", r+1
ep = deque(remove_route(sol, r))
# print "%d customers left to go:"% len(ep), ep
def insert(c, r, pos, ep):
# print "Customer %d goes to %d at pos %d" % (c, r+1, pos)
insert_at_pos(sol, c, r, pos)
#print_like_Czarnas(sol)
# print "Still left are:", ep
recycled = defaultdict(int)
def put_to_ep(c, front=True):
if front:
ep.appendleft(c)
else:
ep.append(c)
recycled[c] += 1
# print "Next (%d) round for %d" % (c, recycled[c])
if any(recycled[x] > 5 for x in ep):
# print "Too much recycling in the EP: dead end"
raise RuntimeError
while len(ep) > 0 and not data['die']:
c = ep.pop()
r = randint(0, sol.k-1)
_, pos = find_bestpos_on(sol, c, r)
if not pos is None:
insert(c, r, pos, ep)
continue
(_, pos), r = find_bestpos(sol, c)
if not pos is None:
insert(c, r, pos, ep)
continue
pos = sorted(find_replace_pos(sol, c))
if pos:
#print "Positions there:", pos
#raw_input()
_, r, p = pos[randint(0, min(5,len(pos)-1))]
put_to_ep(remove_customer(sol, r, p), False)
insert(c, r, p, ep)
continue
put_to_ep(c)
if len(ep) > 0:
print "Time out!"
raise RuntimeError
u.commit()
# MAIN COMMANDS
commands = set()
def command(func):
"""A command decorator - the decoratee should be a valid command."""
commands.add(func.__name__)
return func
# the CLUSTER command - mpi4py parallelism
def mpi_master(sol, comm, size, args):
from mpi4py import MPI
essencs = []
# 'inifinite' values:
my_k = sol.task.N
my_dist = sol.task.dist.sum()
stat = MPI.Status()
time_to_die = time.time() + args.wall
started = time.time()
# initial jobs - creating initial solutions
jobs = deque([('initial', k) for k in sort_keys.keys()])
if len(jobs) < size+5:
jobs.extend([('initial', 'by_random_ord')]*(size+5-len(jobs)))
print "initial jobs are:", jobs
for i in xrange(1, size):
comm.send(jobs.popleft(), dest=i)
# working loop
workers = size-1
while workers > 0:
resp = comm.recv(source=MPI.ANY_SOURCE, status=stat)
if time.time() < time_to_die and len(jobs)>0:
job = jobs.popleft()
while len(jobs) > 2000 and job[0]=='killroute' and job[2][0] > my_k+1:
job = jobs.popleft()
comm.send(job, dest=stat.Get_source())
else:
comm.send(('done',), dest = stat.Get_source())
workers -= 1
if resp[0] == 'initial' or resp[1] == 'ok':
essence = resp[2]
if (my_k, my_dist) > essence[:2]:
sol.set_essence(essence)
sol.loghist()
my_k = sol.k
my_dist = sol.dist
print "%.1f s, new best:" % (time.time()-started), sol.infoline()
if essence[0] < my_k + 2:
for x in xrange(essence[0]):
jobs.append(('killroute', x, essence))
elif resp[0] == 'killroute' and resp[1] == 'failed':
pass # TODO: no idea what failed... (not sent)
if len(jobs) > 1000000 and time_to_die <> 0:
print "We've got problems, %.1f s" % (time.time()-started)
time_to_die = 0
if len(jobs) == 0:
print "The jobs went out, %.1f s" % (time.time()-started)
sol.save('_clus')
exit()
def mpi_worker(sol, comm, rank, args):
# maybe start working immediately
while True:
orders = comm.recv(source=0)
# print rank, "recieved orders:", orders
if orders[0] == 'done':
break
elif orders[0] == 'initial':
VrptwTask.sort_order = orders[1]
build_first(sol)
comm.send(('initial','ok', sol.get_essence()), dest=0)
elif orders[0] == 'killroute':
sol.set_essence(orders[2])
try:
op_route_min(sol, orders[1])
comm.send(('killroute', 'ok', sol.get_essence()), dest=0)
except RuntimeError:
u.undo()
comm.send(('killroute', 'failed', sol.get_essence()), dest=0)
elif orders[0] == 'perturb':
pass
else:
print rank, "orders not understood", orders
print "Bye from worker", rank
exit()
@command
def cluster(args):
from mpi4py import MPI
comm = MPI.COMM_WORLD
size = comm.Get_size()
rank = comm.Get_rank()
if size < 2:
print "Sorry, only for > 1 process"
exit()
sol = VrptwSolution(VrptwTask(args.test))
if rank == 0:
mpi_master(sol, comm, size, args)
else:
mpi_worker(sol, comm, rank, args)
# POSTPROCESSING of old solutions
@command
def resume(args):
"""Load serialized solution, try to aliminate one or two routes."""
# autodestruction timeout mechanism:
data = dict(die=0)
def die():
data['die'] = 1
from threading import Timer
t = Timer(args.wall, die)
t.start()
sol = load_solution(args.test)
# print_like_Czarnas(sol)
tgt = pick_short_route(sol)
# guarded tries
try:
op_route_min(sol, tgt, data=data)
except:
t.cancel()
print "Failed removal of %d from %s, still: %d." % (tgt, sol.task.name, sol.k+1)
exit(1)
else:
t.cancel()
sol.check_full()
sol.save('_rsm')
print_like_Czarnas(sol)
print "Removed %d in %s, now: %s" % (tgt, sol.task.name, sol.infoline()),
@command
def perturb(args):
"""Load a solution and perform some search around."""
sol = load_solution(args.test)
local_search(sol, op_tabu_single)
print_like_Czarnas(sol)
sol.save('_pert')
@command
def grout(args):
"""Postprocess a solution using the proprietary grout program."""
import grout
sol = load_solution(args.test)
grout.DataLoader_load(sol.task.filename)
dd = grout.DistanceDecreaser()
dd.inflate(sol.flatten())
dd.setMaxEpochs(60)
best = grout.Solution()
dd.simulatedAnnealing(best)
sol.inflate(best.flatten())
sol.save('_grout')
print best.flatten()
# LOCAL SEARCH related techniques
def _optimize(test, op, wall, intvl):
"""An optimization funtion, which does not use argparse namespace."""
sol = VrptwSolution(VrptwTask(test))
build_first(sol)
print_like_Czarnas(sol)
print "Starting optimization for %d s, update every %s s." % (wall, intvl)
time_to_die = time.time() + wall
next_feedback = time.time() + intvl
while time.time() < time_to_die:
local_search(sol, operations[op], next_feedback, True)
print_like_Czarnas(sol)
next_feedback = time.time()+intvl
print "Wall time reached for %s." % test.name
sol.save()
print(sol.mem)
print_like_Czarnas(sol)
return sol
@command
def optimize(args):
"""Perform optimization of a VRPTW instance according to the arguments."""
sol = _optimize(args.test, args.op, args.wall, args.intvl)
return sol
def _optimize_by_name(arg):
# open the test filename (VrptwTask had problems with it)
arg[0] = open(arg[0])
return _optimize(*arg)
@command
def run_all(args):
"""As optimize, but runs all instances."""
from glob import glob
runs = args.runs or 1
all_tasks = [[n, args.op, args.wall, args.intvl]
for n in glob(args.glob) * args.runs]
if args.multi:
from multiprocessing import Pool
p = Pool()
p.map(_optimize_by_name, all_tasks)
else:
map(_optimize_by_name, all_tasks)
def load_solution(f):
"""Unpickle solution from a stream."""
solution_data = cPickle.load(f)
print os.path.dirname(__file__), solution_data['filename']
filename = os.path.join(os.path.dirname(__file__),
solution_data['filename'])
print "Loading solution from:", filename
sol = VrptwSolution(VrptwTask(open(filename)))
sol.k, sol.dist = solution_data['val']
sol.r = solution_data['routes']
sol.mem = solution_data['mem']
try:
sol.history = solution_data['history']
except: pass
if not sol.check_full():
return None
print "Solution loaded:", sol.infoline()
return sol
@command
def load(args):
"""Loads a previously saved solution for analysis."""
sol = load_solution(args.test)
print_like_Czarnas(sol)
print sol.mem
print sol.get_signature()
try:
if len(sol.history):
plot_history(sol)
else:
print "The solution has no history to plot"
except ImportError:
print "Plotting history impossible (missing GUI or matplotlib)"
@command
def export(args):
"""Create other formats for saved solution, like .vrp"""
# TODO: real export; for now just print successors
sol = load_solution(args.test)
print "\n".join(str(s) for s in sol.get_successors())
# POOLCHAIN metaheuristic and friends_
def worker(sol, pools, operators, config):
"""The actual working process in a poolchain."""
import Queue as q
from multiprocessing import Queue
proc_id, size, intvl, deadline = config
print "Worker launched, id:", proc_id
# disperse workers' random nubmer generators
r.jumpahead(20000*proc_id)
# disperse workers' feedback a bit (actually: random)
next_feedback = time.time() + (proc_id+1)*intvl
num_produced = 0
# the list for measurement of fps etc.
myfps = []
while time.time() < deadline:
# choose solution to work on this round
try:
# fish in the the pool
new_essence = pools[1].get_nowait()
sol.set_essence(new_essence)
print "Worker", proc_id, "got job:", sol.infoline()
except q.Empty:
# if nothing to take - produce new one or keep current
if num_produced < 5 or r.random() < 4.0/num_produced:
order = r.choice(sort_keys.keys())
VrptwTask.sort_order = order
build_first(sol)
print("Worker %d produced new: %s by %s" %
(proc_id, sol.infoline(), order))
# else: go on with current
# run optimization
local_search(sol, operators[1], next_feedback, speed=myfps)
next_feedback = time.time() + intvl*(size+1)
# throw the solution back to the pool
pools[2].put(sol.get_essence())
# endwhile:
# declare not to do any more output, send 'fps'
pools[2].put((proc_id, sum(myfps)/len(myfps), 0))
# print "Worker", proc_id, "should now finish."
@command
def poolchain(args):
"""Parallel optimization using a pool of workers and a chain of queues."""
import Queue as q
from multiprocessing import cpu_count, Process, Queue
time_to_die = time.time()+args.wall
# create own solution object (for test data being inherited)
began = time.time()
sol = VrptwSolution(VrptwTask(args.test))
# setup the queues
poison_pills = Queue()
input_ = Queue()
output = Queue()
queues = [ poison_pills, input_, output ]
oplist = [ None, operations[args.op], None ]
# create and launch the workers
num_workers = args.runs or cpu_count()
workers = [ Process(
target=worker, args=(sol, queues, oplist,
(i, num_workers, args.intvl, time_to_die)))
for i in xrange(num_workers) ]
map(Process.start, workers)
# get a solution from the fastest worker (we have to service them...)
print "Master waits for first solution..."
essence = output.get()
input_.put(essence)
sol.set_essence(essence)
print "Got first solution:", sol.infoline(), "after", time.time()-began
sol.loghist()
# the discriminators of the solution circulation
best_seen_k = essence[0]
best_essncs = [essence]
if best_seen_k == sol.task.best_k:
print "Best known route count immediately:", time.time()-began
sol.mem['best_k_found'] = time.time()-began
if args.strive:
time_to_die = time.time() + args.wall / 5.0
print "Wall time reduced to:", time_to_die - time.time()
# manage the pool for a while (now - simply feed them back)
# ---- START OF MAIN LOOP ----
while time.time() < time_to_die:
essence = output.get()
# drop solutions worse than best_seen_k+1
if essence[0] <= best_seen_k+1:
# -- check for route count record
if best_seen_k > essence[0]:
best_seen_k = essence[0]
if best_seen_k == sol.task.best_k:
print "Best known route count reached:", time.time()-began
sol.mem['best_k_found'] = time.time()-began
if args.strive and time_to_die > time.time()+args.wall/5.0:
time_to_die = time.time()+args.wall/5.0
print "Remaining time reduced to:", args.wall/5.0
# -- check against pool
pos = bisect_left(best_essncs, essence)
if ( len(best_essncs)<15
or (pos < 15 and best_essncs[pos][:2] <> essence[:2]) ):
# this solution is ok - pay it forward
input_.put(essence)
best_essncs.insert(pos, essence)
if len(best_essncs) > 15:
best_essncs.pop()
if pos == 0:
# new global best - remembering as a historical event
sol.set_essence(essence)
sol.loghist()
else:
# throw in one of the elite solutions
input_.put(r.choice(best_essncs))
elif r.random() < 0.5:
# if solution was bad (route count), maybe throw in old
input_.put(r.choice(best_essncs))
# ---- END OF MAIN LOOP ----
print "Wall time passed, after:", time.time()-began
fpss = []
workers_left = num_workers
while workers_left > 0:
k, dist, routes = output.get()
if routes == 0:
workers_left -= 1
print "Worker's",k,"pill-box received", time.time()-began
fpss.append(dist)
else:
if (k, dist) < sol.val():
sol.set_essence((k, dist, routes))
print 'got out from output: ', k, dist
print "Staff is to join: so much are alive:"
print map(Process.is_alive, workers)
print input_.qsize(), 'solutions still in queue 1'
try:
while True:
# print "Waiting for a solution"
k, dist, routes = input_.get(timeout=0.3)
if (k, dist) < sol.val():
sol.set_essence((k, dist, routes))
print 'got out: ', k, dist
except q.Empty:
pass
try:
output.get(timeout=0.1)
except q.Empty:
pass
else:
print "Possible rubbish in output"
print "Best solution chosen. Saving.", time.time()-began
sol.save('_pc') # suffix for poolchain
print_like_Czarnas(sol)
print "summary:", sol.task.name, "%d %.1f"%sol.val(), "%.1f %.1f"%sol.percentage(),
print "wall", args.wall, "workers", num_workers, "op", args.op, 'best_k',
try:
print "%.1f" % sol.mem['best_k_found'],
except KeyError:
print 'NO',
print 'fps', "%.1f" % sum(fpss)
#map(Process.join, workers)
print "\nTotal time elapsed:", time.time()-began
@command
def initials(args):
"""Produce initial solutions in all available ways, and 10x randomly."""
sol = VrptwSolution(VrptwTask(args.test))
results = []
best_order = None
build_by_savings(sol)
for k in sort_keys.keys():
VrptwTask.sort_order = k
build_first(sol)
results.append((sol.percentage(), k, sol.k))
if min(results) == results[-1]:
best_order = k
VrptwTask.sort_order = 'by_random_ord'
for i in xrange(9):
build_first(sol)
results.append((sol.percentage(), 'by_random_ord', sol.k))
rank = 1
for prec, k, sol_k in sorted(results):
print "%-20s %.2f %.2f routes %d rank %02d %s" % (
(k+':',)+prec+(sol_k, rank, sol.task.name))
rank += 1
# best deterministic order, or the order given
# will be used for saving
if args.order is None:
VrptwTask.sort_order = best_order
else:
VrptwTask.sort_order = args.order
build_first(sol)
sol.save("_init")
def mksol(name = 'c101'):
"""Produce a solution with a task for the given name (shorthand)."""
import os.path
sol = None
for cand in [name, 'solomons/%s'%name, 'hombergers/%s'%name,
'solomons/%s.txt'%name, 'hombergers/%s.txt'%name]:
if os.path.exists(cand):
sol = VrptwSolution(VrptwTask(cand))
return sol
def get_argument_parser():
"""Create and configure an argument parser.
Used by main function; may be used for programmatic access."""
try:
from argparse import ArgumentParser, Action
parser = ArgumentParser(
description="Optimizing VRPTW instances with some heuristics")
parser.add_argument(
"test", type=file, nargs='?', default=os.path.join(
os.path.dirname(__file__), 'hombergers','rc210_1.txt'),
help="the test instance: txt format as by M. Solomon")
parser.add_argument(
"command", choices=commands, nargs="?", default="poolchain",
help="main command to execute (when omitted: poolchain)")
parser.add_argument(
"--op", choices=operations.keys(), nargs="?",
default="op_fight_shortest", help="neighbourhood operator to use")
parser.add_argument(
"--runs", "-n", type=int, default=0,
help="repeat (e.g. optimization) n times, or use n processes")
parser.add_argument(
"--glob", "-g", default="hombergers/*.txt",
help="glob expression for run_all, defaults to all H")
parser.add_argument(
"--wall", "-w", type=int, default=600,
help="approximate walltime (real) in seconds")
parser.add_argument(
"--intvl", type=int, default=10,
help="approximate refresh rate (delay between messages)")
parser.add_argument(
"--strive", action="store_true",
help="run for best known route count, and then only short")
parser.add_argument(
"--multi", "-p", action="store_true",
help="use multiprocessing for parallelism e.g. with run_all")
parser.add_argument(
"--prof", action="store_true",
help="profile the code (don't do that), 10x slower")
class OptionAction(Action):
"""A dispatching action for option parser - global configs"""
def __call__(self, parser, namespace, values,
option_string=None):
if option_string in ['-o', '--output']:
VrptwSolution.outdir = values
elif option_string == '--order':
VrptwTask.sort_order = values
namespace.order = values
elif option_string in ['-s', '--seed']:
global r_seed
r_seed = int(values)
r.seed(r_seed)
parser.add_argument(
"--seed", "-s", action=OptionAction,
help="Set a custom RNG seed")
parser.add_argument(
"--output", "-o", default="output", action=OptionAction,
help="output directory for saving solutions")
parser.add_argument(
"--order", action=OptionAction, choices=sort_keys.keys(),
help="choose specific order for initial customers")
return parser
except ImportError:
print "Install argparse module"
raise
def main(can_profile = False):
"""Entry point when this module is ran at top-level."""
args = get_argument_parser().parse_args()
if can_profile and args.prof:
import cProfile
args.test.close()
cProfile.run('main()', 'profile.bin')
return
# execute the selected command
globals()[args.command](args)
if __name__ == '__main__':
main(True)
| Python |
#!/usr/bin/env python
# may perhaps even work on systems without numpy
try:
import numpy
except:
numpy = type('dummy', (object,), dict(float64=float))()
from vrptw import VrptwTask
class DummyTask(VrptwTask):
def __init__(self, cust = [
[0, 0, 0, 0, 0, 20, 0],
[1, 1, 1, 20, 1, 5, 1],
[2, 2, 2, 10, 0, 8, 1],
[3, 1, 2, 15, 4, 15, 1],
[4, 0, 1, 5, 10, 18, 1]
], Kmax = 10, capa = 200):
self.name = 'test'
self.cust = cust
self.Kmax = Kmax
self.capa = capa
self.N = len(self.cust)-1
self.precompute()
self.best_k, self.best_dist = None, None
def test_savings():
"""Check the savings (Clarke, Wright) construction method."""
from vrptw import VrptwSolution
sol = VrptwSolution(DummyTask())
from pygrout import build_by_savings
build_by_savings(sol)
assert sol.check()
def test_deepmap():
"""Check the utility for mapping nested lists and dictionaries."""
from organize import deepmap
from math import sqrt
plain_list = [1, 3, 2]
assert deepmap(lambda x: x+1, plain_list) == [2, 4, 3]
plain_dict = dict(a='hi', b='there')
assert deepmap(str.upper, plain_dict)==dict(a='HI', b='THERE')
nested = {'a': [0, 4, 16], 'b': [1, 9, 25]}
assert deepmap(sqrt, nested)=={'a': [0., 2., 4.], 'b':[1., 3., 5.]}
def _rec_assert_simmilar(a, b):
assert type(a)==type(b), 'wrong types: %s and %s' % (type(a), type(b))
if type(a) == list or type(a)==tuple:
for pair in zip(a, b):
_rec_assert_simmilar(*pair)
elif type(a) == int:
assert a == b
elif type(a) == float or type(a)==numpy.float64:
assert abs(a-b) < 1e-4
else:
assert False, 'unexpected type: '+str(type(a))
def test_flattening():
"""Checks the format for interchange with other programs, like grout."""
from pygrout import (VrptwSolution, VrptwTask, build_first,
print_like_Czarnas)
task = VrptwTask('solomons/rc208.txt')
s1 = VrptwSolution(task)
build_first(s1)
print_like_Czarnas(s1)
data1 = s1.flatten()
print data1
s2 = VrptwSolution(task)
s2.inflate(data1)
print "Ok, inflated... Let's see:"
print_like_Czarnas(s2)
print s2.flatten()
assert s2.check()
assert s2.flatten()==data1
_rec_assert_simmilar(s1.get_essence(), s2.get_essence())
# possible similar tests: test for assign, copy,
# {get,set}_essence of Solution. But these work already.
def test_find_pos():
"""Check consistency of finding the best position in a route."""
from pygrout import (VrptwSolution, VrptwTask, build_first,
print_like_Czarnas, find_bestpos_on, find_allpos_on, R_EDG)
sol = VrptwSolution(VrptwTask('solomons/rc206.txt'))
build_first(sol)
for i in xrange(sol.k):
for c in sol.r[i][R_EDG][1:]:
for j in xrange(sol.k):
if i <> j:
best = find_bestpos_on(sol, c[0], j)
allp = list(find_allpos_on(sol, c[0], j))
print "Best:", best, "all:", allp
if best == (None, None):
assert allp == []
else:
assert best in allp
assert best == max(allp)
def test_argmap():
"""Test of the class ArgMap from helper.py"""
try:
from helper import ArgMap
except ImportError:
print "Missing something: PyQt or matplotlib etc."
return
from glob import glob
m = ArgMap()
files = glob("solomons/r1*")
files.sort()
m.add(files)
assert m(files[0]) == 1
assert m.ticklabels == ['r101', 'r106']
assert m.ticks == [1, 6]
# Test left out, reenable in case of trouble ;)
def _test_initial_creation():
"""Unit test for creating solutions to all included benchmarks."""
from pygrout import VrptwSolution, VrptwTask, build_first
def check_one(test):
s = VrptwSolution(VrptwTask(test))
build_first(s)
assert s.check()==True, 'Benchmark %s failed at initial solution' % test
from glob import iglob
# Homberger's are too heavy
# from itertools import chain
# tests = chain(iglob("solomons/*.txt"), iglob('hombergers/*.txt'))
tests = iglob("solomons/*.txt")
for test in tests:
yield check_one, test
if __name__ == '__main__':
test_savings()
| Python |
from distutils.core import setup
from glob import glob
setup(name='Pygrout',
version='0.1',
description='VRPTW solving utility',
author='Tomasz Gandor',
url='http://code.google.com/p/pygrout/',
packages=['vrptw', 'solomons', 'hombergers'],
package_data = { 'vrptw': ['bestknown/*.txt'],
'hombergers': ['*.txt'],
'solomons': ['*.txt'] },
py_modules=['pygrout', 'compat', 'undo']
)
| Python |
from random import Random
from operator import itemgetter
import time
import cPickle
import os
import sys
import numpy as np
from undo import UndoStack
from consts import *
u = UndoStack()
"""Global undo - may be later made possible to override."""
r = Random()
"""The random number generator for the optimization."""
r_seed = int(time.time())
r.seed(r_seed)
# Possible customer ordering (when inserting into initial solution)
sort_keys = dict(
by_opening = lambda x: x[A], # by start of TW
by_closing = lambda x: x[B], # by end of TW
by_midtime = lambda x: x[A]+x[B], # by middle of TW
by_weight = lambda x: x[DEM], # by demand
by_opening_desc = lambda x: -x[A], # by start of TW
by_closing_desc = lambda x: -x[B], # by end of TW
by_midtime_desc = lambda x: -x[A]-x[B], # by middle of TW
by_weight_desc = lambda x: -x[DEM], # by demand
by_timewin = lambda x: x[B]-x[A], # ascending TW
by_timewin_desc = lambda x: x[A]-x[B], # descending TW
by_id = lambda x: 0, # unsorted
by_random_ord = lambda x: r.random() # random order
)
class VrptwTask(object):
"""Data loader - holds data of a VRPTW Solomon-formatted test."""
sort_order = 'by_timewin'
def __init__(self, stream, precompute = True):
if type(stream)==str: stream = open(stream)
lines = stream.readlines()
self.filename = stream.name
stream.close()
self.name = lines[0].strip()
self.Kmax, self.capa = map(int, lines[4].split())
self.cust = [ map(int, x.split()) for x in lines[9:] ]
import array
self.cust = [ array.array('i', map(int, x.split())) for x in lines[9:] ]
self.N = len(self.cust)-1
if precompute:
self.precompute()
self.load_best()
def precompute(self):
"""Initialize or update computed members: distances and times."""
# transpose customers, get Xs and Ys and SRVs
x, y, srv, demands = itemgetter(X, Y, SRV, DEM)(zip(*self.cust))
# make squares
xx = np.tile(x, (len(x), 1))
yy = np.tile(y, (len(y), 1))
# compute hypots - distances
self.dist = ((xx-xx.T)**2+(yy-yy.T)**2)**0.5
# compute travel times (including service)
self.time = self.dist + np.tile(srv, (len(srv),1)).T
# calculating demand-related values
self.demands = sorted(demands)
self.sum_demand = sum(demands)
self.kbound_min = -(-self.sum_demand//self.capa)
print "Sum of q: %d (k_min >= %d), Q(0..4) = %d %d %d %d %d" % (
self.sum_demand, self.kbound_min, self.demands[1],
self.demands[self.N//4], self.demands[self.N//2],
self.demands[self.N*3//4], self.demands[-1])
def routeInfo(self, route):
"""Displays a route summary."""
cap, dist = 0.0, 0.0
print "Route:"
for a, b, aa, lab in route[R_EDG]:
print ("From %2d(%2d,%3d) to %2d(%4d,%4d): "
"start(%.2f)+svc(%d)+dist(%5.2f)=startb(%.2f);ltst(%.2f)"
% (a, self.cust[a][A], self.cust[a][B],
b, self.cust[b][A], self.cust[b][B],
aa, self.cust[a][SRV], self.dist[a][b],
aa + self.cust[a][SRV] + self.dist[a][b], lab) )
if lab < aa + self.cust[a][SRV] + self.dist[a][b]:
print "!"*70
cap += self.cust[a][DEM]
dist += self.dist[a][b]
print " Dist now %.2f, load now %.2f" % (dist, cap)
print "Route stored dist %.2f, load %.2f" % (route[R_DIS], route[R_CAP])
def getSortedCustomers(self):
"""Return customer tuples."""
return sorted(self.cust[1:], key=sort_keys[VrptwTask.sort_order])
def load_best(self):
"""Look for saved best solution values in the bestknown/ dir."""
try:
self.best_k, self.best_dist = map(eval, open(
os.path.join(os.path.dirname(__file__), 'bestknown',
self.name+'.txt')).read().split())
print("Best known solution for test %(name)s: %(best_k)d routes,"
" %(best_dist).2f total distance." % self.__dict__)
except IOError as ioe:
self.best_k, self.best_dist = None, None
print >>sys.stderr, ("Best known solution not found for test: "
+self.name)
if os.path.exists(os.path.join('bestknown', self.name+'.txt')):
raise
def bestval(self):
"""Return best value pair."""
return (self.best_k, self.best_dist)
def error(msg):
"""A function to print or suppress errors."""
print msg
class VrptwSolution(object):
"""A routes (lists of customer IDs) collection, basically."""
# default output directory for saved solutions
outdir = os.path.join(os.path.dirname(__file__), '..', "output")
def __init__(self, task):
"""The task could be used to keep track of it."""
self.task = task
self.reset()
# additional field for any purpose
self.mem = {}
self.mem['r_seed'] = r_seed
self.mem['t_start'] = time.time()
self.history = []
def reset(self):
"""Reinitialize the solution as empty."""
self.r = []
self.dist = 0.
self.k = 0
def loghist(self):
"""Put the current time and value into the history list."""
self.history.append( [self.k, self.dist, time.time()-self.mem['t_start']] )
def val(self):
"""Return a tuple to represent the solution value; less is better."""
return (self.k, self.dist)
def percentage(self):
"""Return a tuple of precentage of current solution vs best known."""
if self.task.best_k:
return (100.*self.k/self.task.best_k, 100.*self.dist/self.task.best_dist)
return (100, 100)
def flatten(self):
"""Make a string representation of the solution for grout program."""
return "\n".join(
["%d %f" % (self.k, self.dist)] +
# E_TOW, i.e. edge targets
[" ".join(str(e[1]) for e in rt[R_EDG]) for rt in self.r] + ['0\n'])
def inflate(self, data):
"""Decode and recalculate routes from a string by flatten()."""
# forget everything now:
u.commit()
# trusting the saved values
lines = data.split("\n")
k, dist = lines[0].split()
self.k = int(k); self.dist = float(dist)
# constructing routes
self.r = []
dist_glob = 0
d = self.task.dist
t = self.task.time
cust = self.task.cust
for l in xrange(1, len(lines)-2):
# the last line should contain a newline, so -2
customers = map(int, lines[l].split())
edges = []
load = 0
dist = 0
a = 0
arr_a = 0
for b in customers:
edges.append([a, b, arr_a, 0])
load += cust[b][DEM]
dist += d[a][b]
arr_a = max(arr_a+t[a][b], cust[b][A])
a = b
# set latest arrivat to depot, for propagating later
edges[-1][3] = cust[0][B]
self.r.append([ len(customers), load, dist, edges ])
propagate_deadline(self, -1, len(customers)-1)
dist_glob += dist
self.dist = dist_glob
# Shorthands for access to task object.
def d(self, a, b):
return self.task.dist[a][b]
def t(self, a, b):
return self.task.time[a][b]
def a(self, c):
return self.task.cust[c][A]
def b(self, c):
return self.task.cust[c][B]
def dem(self, c):
return self.task.cust[c][DEM]
def route(self, i):
"""Render a short representation of route i."""
return "-".join(str(e[0]) for e in self.r[i][R_EDG])
def check(self, complete=False):
"""Checks solution, possibly partial, for inconsistency."""
unserviced = set(range(1, self.task.N+1))
for i in xrange(len(self.r)):
if not self.check_route(i, unserviced):
return False
if len(unserviced) and complete:
error("Unserviced customers left in %s: " % self.task.name + ", ".join(str(x) for x in sorted(unserviced)))
total_dist = sum(self.r[i][R_DIS] for i in xrange(self.k))
if abs(total_dist - self.dist) > 1e-3:
error("Wrong total dist: %f, while sum: %f (%d routes for %s)" % (total_dist, self.dist,
self.k, self.task.name))
return False
return True
def check_full(self):
"""Check full solution - shorthand method."""
return self.check(True)
def check_route(self, i, unserviced_ = None ):
"""Check route i for consistency.
Remove found customers from unserviced_"""
now, dist, cap, l = 0, 0, 0, 0
unserviced = unserviced_ if unserviced_ is not None else set(range(1, self.task.N+1))
for fro, to, afro, lato in self.r[i][R_EDG]:
actual = max(now, self.a(fro))
if afro <> actual:
error("Wrong time: %.2f (expected %.2f, err %.3f) on rt %d"
" edge %d from %d to %d, a(from) %d"
% (afro, actual, actual-afro, i, l, fro, to, self.a(fro)))
error(self.route(i))
return False
if fro:
if not fro in unserviced:
error("Customer %d serviced again on route %d" % (fro, i))
else:
unserviced.remove(fro)
dist += self.d(fro, to)
cap += self.dem(fro)
if cap > self.task.capa:
error("Vehicle capacity exceeded on route %d with customer %d" % (i, fro))
return False
l += 1
now = actual + self.t(fro, to)
if l != self.r[i][R_LEN]:
error("Wrong length %d (actual %d) for route %d" % (self.r[i][R_LEN], l, i))
return False
if abs(dist - self.r[i][R_DIS]) > 1e-4:
error("Wrong distance %f (actual %f) for route %d" % (self.r[i][R_DIS], dist, i))
return False
return True
def save(sol, extra=None):
"""Dump (pickle) the solution."""
import uuid
# handling unknown percentage (r207.50 and r208.50, actually)
prec_k, prec_d = map(
lambda x: "%05.1f" % x if sol.task.best_k else 'x'*5,
sol.percentage())
# time signature - minutes and seconds (too little?)
time_sig = "%02d%02d" % divmod(int(time.time())%3600, 60)
# additional markers
if not extra is None: time_sig += str(extra)
node_sig = hex(uuid.getnode())[-4:]
save_name = "%s-%s-%s-%02d-%05.1f-%s-%s.p" % (
sol.task.name, prec_k, prec_d, sol.k, sol.dist,
sol.get_signature()[:8], time_sig)
sol.mem['save_name'] = save_name
sol.mem['save_time'] = time.time()
sol.mem['t_elapsed'] = time.time() - sol.mem['t_start']
sol.mem['host_sig'] = node_sig
sol.mem['signature'] = sol.get_signature()
save_data = dict(
routes = sol.r,
mem = sol.mem,
val = sol.val(),
filename = sol.task.filename,
name = sol.task.name,
percentage = sol.percentage(),
history = sol.history )
if not os.path.exists(sol.outdir):
os.makedirs(sol.outdir)
target_path = os.path.join(sol.outdir, save_name)
if os.path.exists(target_path):
print "File %s - such solution already exists" % target_path
else:
cPickle.dump(save_data, open(target_path, 'wb'))
# not writing the copy - use the export command
# open(os.path.join(sol.outdir, save_name.replace('.p', '.vrp')), 'w').write(sol.flatten())
return sol
def copy(self):
"""Return a copy the solution in a possibly cheap way."""
clone = VrptwSolution(self.task)
clone.assign(self)
return clone
def assign(self, rvalue):
"""Assignment operator - copy essential features from another solution."""
self.k = rvalue.k
self.dist = rvalue.dist
self.r = cPickle.loads(cPickle.dumps(rvalue.r, 2))
def get_essence(self):
"""Return the most interesting part of the solution - routes."""
return (self.k, self.dist, self.r)
def set_essence(self, essence):
"""Set new routes and value: use with result of get_essence."""
self.k, self.dist, self.r = essence
def get_successors(self):
"""Return an array of nodes' successors, 0 for depot."""
data = [0] * (self.task.N+1)
for route in self.r:
for a, b, _, _ in route[R_EDG][1:]:
data[a] = b
return data
def get_signature(self):
"""Return a hex digest of the solution."""
import hashlib
return hashlib.md5("-".join(str(succ) for succ in self.get_successors())).hexdigest()
def infoline(self):
return "(%d, %.2f) (%5.1f%%, %5.1f%%)" % (self.val()+self.percentage())
def propagate_arrival_ref(sol, rr, pos):
edges = rr[R_EDG]
time = sol.task.time
cust = sol.task.cust
a, b, arr_a, _ = edges[pos]
for idx in xrange(pos+1, len(edges)):
b, _, old_arrival, _ = edges[idx]
new_arrival = max(arr_a + time[a][b], cust[b][A])
# check, if there is a modification
if new_arrival == old_arrival:
break
u.set(edges[idx], E_ARF, new_arrival)
a = b
arr_a = new_arrival
def propagate_arrival(sol, r, pos):
"""Update arrivals (actual service begin) on a route after pos."""
edges = sol.r[r][R_EDG]
time = sol.task.time
cust = sol.task.cust
a, b, arr_a, _ = edges[pos]
for idx in xrange(pos+1, len(edges)):
b, _, old_arrival, _ = edges[idx]
new_arrival = max(arr_a + time[a][b], cust[b][A])
# check, if there is a modification
if new_arrival == old_arrival:
break
u.set(edges[idx], E_ARF, new_arrival)
a = b
arr_a = new_arrival
def propagate_deadline_ref(sol, rr, pos):
"""Update deadlines (latest legal service begin) on a route before pos."""
edges = rr[R_EDG]
_, b, _, larr_b = edges[pos]
time = sol.task.time
cust = sol.task.cust
for idx in xrange(pos-1, -1, -1):
_, a, _, old_deadline = edges[idx]
new_deadline = min(larr_b-time[a][b], cust[a][B])
# check, if there is a modification
if new_deadline == old_deadline:
break
u.set(edges[idx], E_LAT, new_deadline)
b = a
larr_b = new_deadline
def propagate_deadline(sol, r, pos):
"""Update deadlines (latest legal service begin) on a route before pos."""
edges = sol.r[r][R_EDG]
_, b, _, larr_b = edges[pos]
time = sol.task.time
cust = sol.task.cust
for idx in xrange(pos-1, -1, -1):
_, a, _, old_deadline = edges[idx]
new_deadline = min(larr_b-time[a][b], cust[a][B])
# check, if there is a modification
if new_deadline == old_deadline:
break
u.set(edges[idx], E_LAT, new_deadline)
b = a
larr_b = new_deadline
# THE MODEL - basic operations on a solution (through UndoStack
def insert_new(sol, c):
"""Inserts customer C on a new route."""
new_route = [
2, # number of edges
sol.dem(c), # demand on route
sol.d(0,c)+sol.d(c,0), # distance there and back
[
[0, c, 0, sol.b(c)], # depot -> c
[c, 0, max(sol.t(0,c), sol.a(c)), sol.b(0)] # c -> depot
]
]
u.ins(sol.r, sol.k, new_route)
u.atr(sol, 'k', sol.k+1) # route no inc
u.atr(sol, 'dist', sol.dist+new_route[R_DIS]) # total distance inc
def insert_at_pos(sol, c, r, pos):
"""Inserts c into route ad pos. Does no checks."""
# update edges (with arival times)
edges = sol.r[r][R_EDG]
# old edge
a, b, arr_a, larr_b = u.pop(edges, pos)
# arrival and latest arrival time to middle
arr_c = max(arr_a + sol.t(a, c), sol.a(c))
larr_c = min(sol.b(c), larr_b-sol.t(c, b))
assert arr_c <= larr_c, 'invalid insertion, time window violated'
# new edges - second then first
u.ins(edges, pos, [c, b, arr_c, larr_b])
u.ins(edges, pos, [a, c, arr_a, larr_c])
# propagate time window constraints - forward
propagate_arrival(sol, r, pos+1)
# propagate time window constraints - backward
propagate_deadline(sol, r, pos)
# update distances
dinc = sol.d(a, c)+sol.d(c, b)-sol.d(a, b)
u.add(sol.r[r], R_DIS, dinc)
u.ada(sol, 'dist', dinc)
# update capacity
u.add(sol.r[r], R_CAP, sol.dem(c))
# update count
u.add(sol.r[r], R_LEN, 1)
def find_bestpos_on(sol, c, r):
"""Finds best position to insert customer on existing route."""
# check capacity
if sol.r[r][R_CAP] + sol.dem(c) > sol.task.capa:
return None, None
# pull out deep things locally
time = sol.task.time
cust = sol.task.cust
dist = sol.task.dist
c_a = cust[c][A]
c_b = cust[c][B]
def eval_edge(pack):
pos, (a, b, arr_a, larr_b) = pack
arr_c = max(arr_a + time[a][c], c_a) # earliest possible
larr_c = min(c_b, larr_b-time[c][b]) # latest if c WAS here
larr_a = min(sol.b(a), larr_c-time[a][c])
if arr_c <= larr_c and arr_a <= larr_a:
return (-(dist[a][c] + dist[c][b] - dist[a][b]), pos)
return None, None
# find the best edge
return max(map(eval_edge, enumerate(sol.r[r][R_EDG])))
def find_bestpos(sol, c):
"""Find best positions on any route, return the route pos and distance.
The exact format is a nested tuple: ((-dist increase, position), route)"""
bdp = (None, None)
br = None
for i in xrange(sol.k):
for m in find_allpos_on(sol, c, i):
if m > bdp:
bdp = m
br = i
return (bdp, br)
def insert_customer(sol, c):
"""Insert customer at best position or new route."""
if sol.k == 0:
insert_new(sol, c)
return sol.k-1, 0
else:
# best distinc, best pos, best route
(bd, bp), br = find_bestpos(sol, c)
# found some route to insert
if not bd is None:
insert_at_pos(sol, c, br, bp)
return br, bp
else:
insert_new(sol, c)
return sol.k-1, 0
def remove_customer(sol, r, pos):
"""Remove customer at pos from a route and return his ID."""
assert pos < sol.r[r][R_LEN], 'removal past route end'
edges = sol.r[r][R_EDG]
a, b, arr_a, larr_b = u.pop(edges, pos)
d, c, arr_b, larr_c = u.pop(edges, pos)
assert b == d, 'adjacent edges do not meet in one node'
if sol.r[r][R_LEN] == 2: # last customer - remove route
rt = u.pop(sol.r, r)
# solution route count decrease
u.ada(sol, 'k', -1)
# solution distance decrease
u.ada(sol, 'dist', -rt[R_DIS])
return b
assert arr_a + sol.t(a, c) < larr_c, 'time window error after removal'
u.ins(edges, pos, [a, c, arr_a, larr_c])
# propagating time window constraints
propagate_arrival(sol, r, pos)
propagate_deadline(sol, r, pos)
# update distances (probably decrease)
dinc = sol.d(a, c)-sol.d(a, b)-sol.d(b, c)
u.add(sol.r[r], R_DIS, dinc)
u.ada(sol, 'dist', dinc)
# update capacity
u.add(sol.r[r], R_CAP, -sol.dem(b))
# update count
u.add(sol.r[r], R_LEN, -1)
return b
def find_allpos_on(sol, c, r, startpos=0):
"""Find all positions where customer c can be inserted on route r
and return them as tuples (distinc, position)."""
# check capacity
if sol.r[r][R_CAP] + sol.dem(c) > sol.task.capa:
return
# check route edges
edges = sol.r[r][R_EDG]
time = sol.task.time
cust = sol.task.cust
dist = sol.task.dist
c_a = cust[c][A]
c_b = cust[c][B]
for pos in xrange(startpos, sol.r[r][R_LEN]):
a, b, arr_a, larr_b = edges[pos]
if c_a > larr_b:
# too early
continue
if arr_a > c_b:
# too late
break
arr_c = max(arr_a + time[a][c], c_a) # earliest possible
larr_c = min(c_b, larr_b-time[c][b]) # latest if c WAS here
larr_a = min(cust[a][B], larr_c-time[a][c])
if arr_c <= larr_c and not arr_a <= larr_a:
print "yes, this ever happens..."
if arr_c <= larr_c and arr_a <= larr_a:
# for some cases distinc in optional...
distinc = -(dist[a][c] + dist[c][b] - dist[a][b])
yield (distinc, pos)
def join_routes(sol, r1, r2):
"""Append r2 to the end of r1. Currently irreversible."""
# print sol.r[r1][R_EDG]
# print sol.r[r2][R_EDG]
c, _, arr_c, _ = sol.r[r1][R_EDG].pop()
_, d, _, larr_d = sol.r[r2][R_EDG].pop(0)
pos = sol.r[r1][R_LEN]-1
saving = sol.d(c, 0) + sol.d(0, d) - sol.d(c, d)
sol.r[r1][R_EDG].append([c, d, arr_c, larr_d])
sol.r[r1][R_EDG].extend(sol.r[r2][R_EDG])
sol.r[r1][R_LEN] += sol.r[r2][R_LEN]-1
sol.r[r1][R_CAP] += sol.r[r2][R_CAP]
sol.r[r1][R_DIS] += sol.r[r2][R_DIS] - saving
propagate_arrival(sol, r1, pos)
propagate_deadline(sol, r1, pos)
# print sol.r[r1][R_EDG]
sol.r.pop(r2)
sol.k -= 1
sol.dist -= saving
def join_routes_ref(sol, rr1, rr2):
c, _, arr_c, _ = rr1[R_EDG].pop()
_, d, _, larr_d = rr2[R_EDG].pop(0)
pos = rr1[R_LEN]-1
saving = sol.d(c, 0) + sol.d(0, d) - sol.d(c, d)
rr1[R_EDG].append([c, d, arr_c, larr_d])
rr1[R_EDG].extend(rr2[R_EDG])
rr1[R_LEN] += rr2[R_LEN]-1
rr1[R_CAP] += rr2[R_CAP]
rr1[R_DIS] += rr2[R_DIS] - saving
propagate_arrival_ref(sol, rr1, pos)
propagate_deadline_ref(sol, rr1, pos)
# print sol.r[r1][R_EDG]
sol.r.remove(rr2)
sol.k -= 1
sol.dist -= saving
| Python |
# tuple indices in customer tuple:
# number, coordinates(X,Y), demand, ready(A), due(B), service time
ID, X, Y, DEM, A, B, SRV = range(7)
# list indices in route list structure:
# route len (num edges), capacity, total distance, edge list
R_LEN, R_CAP, R_DIS, R_EDG = range(4)
# list indices in edge list structure (in route edge list)
# customer "a" id, customer "b" id, arrival at "a", latest at "b"
E_FRO, E_TOW, E_ARF, E_LAT = range(4)
| Python |
#!/usr/bin/env python
import sys
import pstats
import os
def main():
if len(sys.argv) == 1:
print "\nUsage: %s profile_output [sort_order] [profile_output...]" % sys.argv[0]
print """
Where sort order may be: time (default), cumulative, ...
(taken from documentation:)
Valid Arg Meaning
'calls' call count
'cumulative' cumulative time
'file' file name
'module' file name
'pcalls' primitive call count
'line' line number
'name' function name
'nfl' name/file/line
'stdname' standard name
'time' internal time
"""
exit()
# the process
order = 'time'
files = filter(os.path.exists, sys.argv[1:])
extra = list(set(sys.argv[1:])-set(files))
if len(extra) > 0:
order = extra[0]
if len(extra) > 1:
print "Warning: excess args ignored:", extra[1:]
s = pstats.Stats(files[0])
map(s.add, files[1:])
num_rows = int(os.getenv('ROWS', '20'))
s.sort_stats(order).print_stats(num_rows)
if __name__ == '__main__':
main()
| Python |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'helper.ui'
#
# Created: Mon Aug 29 16:54:25 2011
# by: PyQt4 UI code generator 4.8.3
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
_fromUtf8 = lambda s: s
class Ui_Helper(object):
def setupUi(self, Helper):
Helper.setObjectName(_fromUtf8("Helper"))
Helper.resize(661, 701)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(1)
sizePolicy.setHeightForWidth(Helper.sizePolicy().hasHeightForWidth())
Helper.setSizePolicy(sizePolicy)
self.verticalLayout = QtGui.QVBoxLayout(Helper)
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.widget = QtGui.QWidget(Helper)
self.widget.setMinimumSize(QtCore.QSize(0, 0))
self.widget.setObjectName(_fromUtf8("widget"))
self.horizontalLayout_2 = QtGui.QHBoxLayout(self.widget)
self.horizontalLayout_2.setMargin(0)
self.horizontalLayout_2.setObjectName(_fromUtf8("horizontalLayout_2"))
self.groupBox_2 = QtGui.QGroupBox(self.widget)
self.groupBox_2.setObjectName(_fromUtf8("groupBox_2"))
self.horizontalLayout_3 = QtGui.QHBoxLayout(self.groupBox_2)
self.horizontalLayout_3.setObjectName(_fromUtf8("horizontalLayout_3"))
self.families = QtGui.QListWidget(self.groupBox_2)
self.families.setMinimumSize(QtCore.QSize(30, 80))
self.families.setObjectName(_fromUtf8("families"))
QtGui.QListWidgetItem(self.families)
QtGui.QListWidgetItem(self.families)
QtGui.QListWidgetItem(self.families)
QtGui.QListWidgetItem(self.families)
QtGui.QListWidgetItem(self.families)
QtGui.QListWidgetItem(self.families)
QtGui.QListWidgetItem(self.families)
QtGui.QListWidgetItem(self.families)
QtGui.QListWidgetItem(self.families)
QtGui.QListWidgetItem(self.families)
QtGui.QListWidgetItem(self.families)
QtGui.QListWidgetItem(self.families)
QtGui.QListWidgetItem(self.families)
QtGui.QListWidgetItem(self.families)
QtGui.QListWidgetItem(self.families)
QtGui.QListWidgetItem(self.families)
QtGui.QListWidgetItem(self.families)
QtGui.QListWidgetItem(self.families)
QtGui.QListWidgetItem(self.families)
QtGui.QListWidgetItem(self.families)
self.horizontalLayout_3.addWidget(self.families)
self.best = QtGui.QPushButton(self.groupBox_2)
self.best.setObjectName(_fromUtf8("best"))
self.horizontalLayout_3.addWidget(self.best)
self.horizontalLayout_2.addWidget(self.groupBox_2)
self.groupBox_4 = QtGui.QGroupBox(self.widget)
self.groupBox_4.setObjectName(_fromUtf8("groupBox_4"))
self.verticalLayout_2 = QtGui.QVBoxLayout(self.groupBox_4)
self.verticalLayout_2.setObjectName(_fromUtf8("verticalLayout_2"))
self.groupBox = QtGui.QGroupBox(self.groupBox_4)
self.groupBox.setObjectName(_fromUtf8("groupBox"))
self.horizontalLayout = QtGui.QHBoxLayout(self.groupBox)
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
self.label = QtGui.QLabel(self.groupBox)
self.label.setObjectName(_fromUtf8("label"))
self.horizontalLayout.addWidget(self.label)
self.mi = QtGui.QDoubleSpinBox(self.groupBox)
self.mi.setDecimals(2)
self.mi.setSingleStep(0.05)
self.mi.setProperty(_fromUtf8("value"), 1.0)
self.mi.setObjectName(_fromUtf8("mi"))
self.horizontalLayout.addWidget(self.mi)
self.has_waitlimit = QtGui.QCheckBox(self.groupBox)
self.has_waitlimit.setObjectName(_fromUtf8("has_waitlimit"))
self.horizontalLayout.addWidget(self.has_waitlimit)
self.waitlimit = QtGui.QSpinBox(self.groupBox)
self.waitlimit.setEnabled(False)
self.waitlimit.setMaximum(120)
self.waitlimit.setSingleStep(30)
self.waitlimit.setObjectName(_fromUtf8("waitlimit"))
self.horizontalLayout.addWidget(self.waitlimit)
self.mfs = QtGui.QCheckBox(self.groupBox)
self.mfs.setObjectName(_fromUtf8("mfs"))
self.horizontalLayout.addWidget(self.mfs)
self.update = QtGui.QPushButton(self.groupBox)
self.update.setObjectName(_fromUtf8("update"))
self.horizontalLayout.addWidget(self.update)
self.verticalLayout_2.addWidget(self.groupBox)
self.groupBox_3 = QtGui.QGroupBox(self.groupBox_4)
self.groupBox_3.setObjectName(_fromUtf8("groupBox_3"))
self.horizontalLayout_4 = QtGui.QHBoxLayout(self.groupBox_3)
self.horizontalLayout_4.setObjectName(_fromUtf8("horizontalLayout_4"))
self.greedyOrder = QtGui.QComboBox(self.groupBox_3)
self.greedyOrder.setObjectName(_fromUtf8("greedyOrder"))
self.horizontalLayout_4.addWidget(self.greedyOrder)
self.greedy = QtGui.QPushButton(self.groupBox_3)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.greedy.sizePolicy().hasHeightForWidth())
self.greedy.setSizePolicy(sizePolicy)
self.greedy.setObjectName(_fromUtf8("greedy"))
self.horizontalLayout_4.addWidget(self.greedy)
self.verticalLayout_2.addWidget(self.groupBox_3)
self.horizontalLayout_2.addWidget(self.groupBox_4)
self.verticalLayout.addWidget(self.widget)
self.textEdit = QtGui.QTextEdit(Helper)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.textEdit.sizePolicy().hasHeightForWidth())
self.textEdit.setSizePolicy(sizePolicy)
self.textEdit.setReadOnly(True)
self.textEdit.setObjectName(_fromUtf8("textEdit"))
self.verticalLayout.addWidget(self.textEdit)
self.progressBar = QtGui.QProgressBar(Helper)
self.progressBar.setMaximum(100)
self.progressBar.setProperty(_fromUtf8("value"), 0)
self.progressBar.setObjectName(_fromUtf8("progressBar"))
self.verticalLayout.addWidget(self.progressBar)
self.clearPlot = QtGui.QPushButton(Helper)
self.clearPlot.setObjectName(_fromUtf8("clearPlot"))
self.verticalLayout.addWidget(self.clearPlot)
self.retranslateUi(Helper)
self.families.setCurrentRow(-1)
QtCore.QObject.connect(self.has_waitlimit, QtCore.SIGNAL(_fromUtf8("toggled(bool)")), self.waitlimit.setEnabled)
QtCore.QMetaObject.connectSlotsByName(Helper)
def retranslateUi(self, Helper):
Helper.setWindowTitle(QtGui.QApplication.translate("Helper", "Route construction testing", None, QtGui.QApplication.UnicodeUTF8))
self.groupBox_2.setTitle(QtGui.QApplication.translate("Helper", "Test group selection", None, QtGui.QApplication.UnicodeUTF8))
__sortingEnabled = self.families.isSortingEnabled()
self.families.setSortingEnabled(False)
self.families.item(0).setText(QtGui.QApplication.translate("Helper", "solomons/*.txt", None, QtGui.QApplication.UnicodeUTF8))
self.families.item(1).setText(QtGui.QApplication.translate("Helper", "solomons/c*.txt", None, QtGui.QApplication.UnicodeUTF8))
self.families.item(2).setText(QtGui.QApplication.translate("Helper", "solomons/c1*.txt", None, QtGui.QApplication.UnicodeUTF8))
self.families.item(3).setText(QtGui.QApplication.translate("Helper", "solomons/c2*.txt", None, QtGui.QApplication.UnicodeUTF8))
self.families.item(4).setText(QtGui.QApplication.translate("Helper", "solomons/r[12]*.txt", None, QtGui.QApplication.UnicodeUTF8))
self.families.item(5).setText(QtGui.QApplication.translate("Helper", "solomons/r1*.txt", None, QtGui.QApplication.UnicodeUTF8))
self.families.item(6).setText(QtGui.QApplication.translate("Helper", "solomons/r2*.txt", None, QtGui.QApplication.UnicodeUTF8))
self.families.item(7).setText(QtGui.QApplication.translate("Helper", "solomons/rc*.txt", None, QtGui.QApplication.UnicodeUTF8))
self.families.item(8).setText(QtGui.QApplication.translate("Helper", "solomons/rc1*.txt", None, QtGui.QApplication.UnicodeUTF8))
self.families.item(9).setText(QtGui.QApplication.translate("Helper", "solomons/rc2*.txt", None, QtGui.QApplication.UnicodeUTF8))
self.families.item(10).setText(QtGui.QApplication.translate("Helper", "hombergers/*_2??.txt", None, QtGui.QApplication.UnicodeUTF8))
self.families.item(11).setText(QtGui.QApplication.translate("Helper", "hombergers/c?_2*.txt", None, QtGui.QApplication.UnicodeUTF8))
self.families.item(12).setText(QtGui.QApplication.translate("Helper", "hombergers/c1_2*.txt", None, QtGui.QApplication.UnicodeUTF8))
self.families.item(13).setText(QtGui.QApplication.translate("Helper", "hombergers/c2_2*.txt", None, QtGui.QApplication.UnicodeUTF8))
self.families.item(14).setText(QtGui.QApplication.translate("Helper", "hombergers/r[12]_2*.txt", None, QtGui.QApplication.UnicodeUTF8))
self.families.item(15).setText(QtGui.QApplication.translate("Helper", "hombergers/r1_2*.txt", None, QtGui.QApplication.UnicodeUTF8))
self.families.item(16).setText(QtGui.QApplication.translate("Helper", "hombergers/r2_2*.txt", None, QtGui.QApplication.UnicodeUTF8))
self.families.item(17).setText(QtGui.QApplication.translate("Helper", "hombergers/rc?_2*.txt", None, QtGui.QApplication.UnicodeUTF8))
self.families.item(18).setText(QtGui.QApplication.translate("Helper", "hombergers/rc1_2*.txt", None, QtGui.QApplication.UnicodeUTF8))
self.families.item(19).setText(QtGui.QApplication.translate("Helper", "hombergers/rc2_2*.txt", None, QtGui.QApplication.UnicodeUTF8))
self.families.setSortingEnabled(__sortingEnabled)
self.best.setText(QtGui.QApplication.translate("Helper", "Plot best", None, QtGui.QApplication.UnicodeUTF8))
self.groupBox_4.setTitle(QtGui.QApplication.translate("Helper", "Construction heuristic", None, QtGui.QApplication.UnicodeUTF8))
self.groupBox.setTitle(QtGui.QApplication.translate("Helper", "Savings heuristic", None, QtGui.QApplication.UnicodeUTF8))
self.label.setText(QtGui.QApplication.translate("Helper", "µ param", None, QtGui.QApplication.UnicodeUTF8))
self.has_waitlimit.setText(QtGui.QApplication.translate("Helper", "use waitlimit", None, QtGui.QApplication.UnicodeUTF8))
self.mfs.setText(QtGui.QApplication.translate("Helper", "mfs", None, QtGui.QApplication.UnicodeUTF8))
self.update.setText(QtGui.QApplication.translate("Helper", "Plot", None, QtGui.QApplication.UnicodeUTF8))
self.groupBox_3.setTitle(QtGui.QApplication.translate("Helper", "Greedy build first", None, QtGui.QApplication.UnicodeUTF8))
self.greedy.setText(QtGui.QApplication.translate("Helper", "Plot", None, QtGui.QApplication.UnicodeUTF8))
self.textEdit.setHtml(QtGui.QApplication.translate("Helper", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'Ubuntu\'; font-size:9pt; font-weight:400; font-style:normal;\">\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\">Welcome to route construction tester. This is a notification console.</p></body></html>", None, QtGui.QApplication.UnicodeUTF8))
self.clearPlot.setText(QtGui.QApplication.translate("Helper", "Reset plot", None, QtGui.QApplication.UnicodeUTF8))
| Python |
#!/usr/bin/env python
import re
import sys
def smart_input(prompt, history=None, suggestions=[], info=None):
from collections import deque
def ensure_file(f):
import os
if os.path.exists(f):
return f
if not os.path.exists(os.path.dirname(f)):
os.makedirs(os.path.dirname(f))
open(f, 'w')
return f
def inner_loop(default, suggestions, history):
while True:
question = "%s (%s): " % (prompt, default)
ans = raw_input(question)
if ans == '':
return default
if ans == ' ':
if not len(history):
print "No history provided..."
continue
if len(history) == 1 and default == history[0]:
print "No other history items."
history.rotate(-1)
default = history[0]
continue
if ans == ' ':
if not len(history):
print "No history provided..."
continue
if len(history) == 1 and default == history[0]:
print "No other history items."
history.rotate(1)
default = history[0]
continue
if ans == '`':
if not len(suggestions):
print "No suggestions provided..."
continue
if len(suggestions) == 1 and default == suggestions[0]:
print "No other suggestions."
suggestions.rotate(-1)
default = suggestions[0]
continue
if ans == '``':
if not len(suggestions):
print "No suggestions provided..."
continue
if len(suggestions) == 1 and default == suggestions[0]:
print "No other suggestions."
suggestions.rotate(1)
default = suggestions[0]
continue
if ans[0] == '+':
return default + ans[1:]
if ans == '~':
if default == history[0]:
print "Removing '%s' from history." % default
history.popleft()
default = ''
continue
# all other cases:
return ans
suggestions = deque(suggestions)
default = suggestions[0] if len(suggestions) else ''
if history is None:
hist = deque()
else:
hist = deque(map(str.strip, open(ensure_file(history))))
# print hist
result = inner_loop(default, suggestions, hist)
if not history is None:
if result in hist:
hist.remove(result)
hist.appendleft(result)
open(history,'w').write("\n".join(hist))
return result
def run_job(task_portion, wall, auto = False, extra = '', prefix='vrptw'):
from subprocess import Popen, PIPE
from os import getcwd
# some task preparation (if it wasn't a file, but a test name?)
script = """
cd %s
pwd
date """ % getcwd() + "".join("""
./pygrout.py %s --wall %d %s
date """ % (extra, wall, task) for task in task_portion)
# prepare jobname
jobname = re.sub('.txt|hombergers/|solomons/', '', prefix+'_' + task_portion[0])
command = 'qsub -l nodes=1:nehalem -l walltime=%d -N %s -e /tmp' % (
(wall+60)*len(task_portion), jobname)
if not auto:
print "About to pipe: \n%s\n to the command: \n%s\n\nPress Enter" % (
script, command)
raw_input()
output, errors = Popen(command, shell=True, stdin=PIPE,
stdout=PIPE, stderr=PIPE).communicate(script)
print "Process returned", repr((output, errors))
return command, script, jobname, output.strip()
def main():
import datetime
# pbs_opts = smart_input('PBS options', 'output/.pbs/options',
# ['-l nodes=1:nehalem -l walltime=20000'])
# tasks = smart_input('Tasks [glob pattern]', 'output/.pbs/tasks',
# ['solomons/', 'hombergers/', 'hombergers/*_2??.txt'])
# pygrout_opts = smart_input('pygrout options', 'output/.pbs/pygroupts',
# ['--strive --wall 600', '--wall '])
if len(sys.argv) < 2:
print "No arguments (tasks) provided"
return
tasks = sys.argv[1:]
wall = int(smart_input('Enter wall time (per task)', suggestions=[2000]))
total = len(tasks)*(wall+60)
print "There are %d tasks, which makes %s s (%02d:%02d:%02d) total." % (
len(tasks), total, total/3600, total%3600/60, total%60)
print "A single task is %02d:%02d" % (wall/60+1, wall%60)
per_job = int(smart_input('How many task per job', suggestions=[20]))
total = per_job*(wall+60)
print "A single job will run %02d:%02d:%02d" % (total/3600,
total%3600/60, total%60)
extra = smart_input('Extra args for pygrout', suggestions=[''])
job_name = smart_input('Job name prefix', suggestions=['vrptw'])
auto = raw_input('Confirm single jobs (Y/n)?')=='n'
jobs = []
for i in xrange(0, len(tasks), per_job):
jobs.append(run_job(tasks[i:i+per_job], wall, auto, extra, job_name))
log = "\n".join("""
Command: %s
Script: %s
Job name: %s
Job id: %s
""" % tup for tup in jobs)
open('output/%s.log.txt' % datetime.datetime.now().isoformat(), 'w').write(log)
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/env python
import os
import re
import glob
import textwrap
from collections import defaultdict
# regex to remove the non-setname part of name
cutoff = re.compile('-.*')
fnparse = re.compile("""
(?P<name>[rcRC]{1,2}[12](?:\d{2}|[\d_]{4}))
-(?P<pk>[\d.]+)-(?P<pdist>[\d.]+)
-(?P<k>\d+)-(?P<dist>[\d.]+)-""", re.VERBOSE)
class smallstat(object):
"""Like a little defaultdict(int), but contigous."""
def __init__(self):
self.data = [0]
def inc(self, idx, inc=1):
"""Increment the index idx by inc(=1), create in needed."""
if idx >= len(self.data):
self.data.extend([0]*(1+idx-len(self.data)))
self.data[idx] += inc
def multibar(*args, **kwargs):
"""Plot multiple bar charts like for comparison, with pylab."""
from pylab import bar, show
from itertools import cycle
import numpy as np
width = kwargs.setdefault('width', 0.8)
left = kwargs.setdefault('left', None)
colors = cycle(kwargs.setdefault('colors', 'brgmcyk'))
offset = 0
delta = width/len(args)
results = []
for arg in args:
if type(arg) is tuple:
myleft, arg = arg
myleft = np.array(myleft) + offset
elif not left is None:
myleft = np.array(left) + offset
else:
myleft = np.arange(len(arg)) + offset
res = bar(myleft, arg, width=delta, color=colors.next())
results.append(res)
offset += delta
return results
def find_medium(test):
"""Glob and return all but the 'smallest' and 'largest' files."""
# missing case-insensitive glob. Besides, this Solomon's mess
# with capital C, R and RC might be worth cleaning up...
return sorted(glob.glob(test+'*.p')+glob.glob(test.upper()+'*.p'))[1:-1]
def read_as_set(f):
"""Read the file and return set of lines."""
return set(map(str.strip, open(f)))
def split_groups(s):
"""Insert newlines before first occurence of a group."""
for g in "c2 r1 r2 rc1 rc2".split():
s, _ = re.subn(r"\s%s"%g, "\n%s"%g, s, count=1)
return s
def printf(set_):
"""Used to display a set, with count, sorted and textwrapped."""
print "(%d)"%len(set_)
un_derscore = lambda x: x.replace('_', '0')
splat = split_groups(" ".join(sorted(set_, key=un_derscore)))
print "\n\n".join(textwrap.fill(l) for l in splat.split("\n"))
def print_grouped(sum_of_all):
"""Output with printf, but Solomon and Homberger separately."""
# Junk suppressed:
"""
print "All found results are:"
printf(sum_of_all)
print "Including junk:"
printf(sum_of_all.difference(
sel_solomons(sum_of_all), sel_homberger(sum_of_all)))
"""
# helpers (maybe later global)
def sel_solomons(set_):
"""Select Solomon test names (only full 100 customer)."""
return set(filter(re.compile('r?c?\d{3}$').match, set_))
def sel_homberger(set_):
"""Select Solomon test names (only full 100 customer)."""
return set(filter(re.compile('r?c?[0-9_]{5}$').match, set_))
print "Full Solomon tests:"
printf(sel_solomons(sum_of_all))
print "Homberger tests:"
printf(sel_homberger(sum_of_all))
def compare(*args):
"""Read in the passed files and display differences."""
if len(args) < 2:
print "Provide at least two filenames to compare."
return
if len(args) > 2:
print "Warning: only 2 files work now."
first, secnd = map(read_as_set, args[:2])
print "Only in%s:" % args[0]
print_grouped(first.difference(secnd))
print "Only in %s:" % args[1]
print_grouped(secnd.difference(first))
print "In both:"
print_grouped(first.intersection(secnd))
def union(*args):
"""Read in the passed files and display the union (set sum)."""
if len(args) < 1:
print "Provide at least two filenames to add together."
return
sets = map(read_as_set, args)
sum_of_all = set.union(*sets)
print_grouped(sum_of_all)
def raw_union(*args):
"""Print the union of arguments, one per line, no bubblegum."""
return "\n".join(sorted(set.union(*map(read_as_set, args))))
def raw_intersection(*args):
"""Print the union of arguments, one per line, no bubblegum."""
return "\n".join(sorted(set.intersection(*map(read_as_set, args))))
def intersection(*args):
"""Set intersection of one (two) or more files."""
if len(args) < 1:
print "Provide at least two filenames to intersect."
return
sets = map(read_as_set, args)
product_of_all = set.intersection(*sets)
print "The elements repeating all over again are:"
print_grouped(product_of_all)
def progress(*args):
"""Compare a list of files, displaying new items, not found before."""
sets = map(read_as_set, args)
total = set()
for arg, set_ in zip(args, sets):
if len(set_.difference(total)):
print "\n *** New things found in %s:" % arg
print_grouped(set_.difference(total))
else:
print "\n ... Nothing new in %s:" % arg
total.update(set_)
def missing(*args):
"""List problem sets which are missing from all the arguments."""
def gen_hombergers():
"""Set of all Homberger instance names."""
return set([ c+n+s+x
for c in ['c','r','rc']
for n in ['1', '2']
for s in ['_2','_4','_6','_8','10']
for x in (['_%d' % i for i in xrange(1,10)]+['10']) ])
def gen_solomons():
"""Set of all Solomon instance names."""
stats = [
('c1', 9), ('c2', 8),
('r1', 12), ('r2', 11),
('rc1', 8), ('rc2', 8) ]
return set([ '%s%02d' % (fam, num)
for fam, count in stats
for num in xrange(1, count+1) ])
sum_of_all = set.union(*map(read_as_set, args))
hombergers = gen_hombergers()
print "Missing Homberger tests:"
difference = hombergers.difference(sum_of_all)
if difference == hombergers:
print "(ALL %d)" % len(hombergers)
else:
printf(difference)
solomons = gen_solomons()
print "Missing Solomon tests:"
difference = solomons.difference(sum_of_all)
if difference == solomons:
print "(ALL %d)" % len(solomons)
else:
printf(difference)
def main():
"""Main function - clean up a typical /output (sub)directory."""
# helpers
def create_file(fn, set_):
if not os.path.exists(fn):
open(fn, 'w').write("\n".join(sorted(set_)))
else:
present = read_as_set(fn)
if present <> set_:
print "File %s present, but inconsistent, differences" % fn
printf(present.symmetric_difference(set_))
# ensure directory for best results (k == 100%)
if not os.path.exists('100s') and os.path.basename(os.getcwd()) <> '100s':
print "Creating directory 100s (best-k results)"
os.makedirs('100s')
else:
print "Directory 100s already present"
# move best results to their directory (also their .vrp companions)
solved = re.compile('[^-]+-100.0-.*')
sol_ok = filter(solved.match, glob.glob('*.*'))
if len(sol_ok):
print "Moving %d best-k results to 100s:" % len(sol_ok)
for f in sol_ok:
print f
os.rename(f, os.path.join('100s',f))
else:
print "No best-k results found here."
# ensure there is an up-to-date all_list.txt, read results
present = set(glob.glob('*.p'))
if os.path.exists('all_list.txt'):
files = read_as_set('all_list.txt')
if not files >= present:
print "all_list.txt missing files:"
printf(present.difference(files))
files = files.union(present)
open('all_list.txt', 'w').write("\n".join(sorted(files)))
else:
# there was no all_list.txt
open('all_list.txt', 'w').write("\n".join(sorted(present)))
files = present
# grouping of the results to different sets
sets_bad = set(cutoff.sub('', f).lower() for f in files)
# good sets are always in the
sets_good = set(cutoff.sub('', f.replace('100s/','')).lower()
for f in glob.glob("100s/*.p"))
##sets_sometimes = sets_bad.intersection(sets_good)
sets_always = sets_good.difference(sets_bad)
sets_never = sets_bad.difference(sets_good)
# print summaries (for every run)
print "\nBad results:"
print_grouped(sets_bad)
print "\nGood results:"
print_grouped(sets_good)
# quieting down somewhat
"""
print "\nSolved sometimes:"
printf(sets_sometimes)
print "\nSolved never:"
printf(sets_never)
print "\nSolved always:"
printf(sets_always)
"""
# remove junk - medium solutions (conditionally)
if len(present) > 2*len(sets_bad):
if 'y' == raw_input('Delete medium solutions (y/N)?'):
for i in sets_bad:
moritures = find_medium(i)
print i, len(moritures)
for f in moritures:
print "Removing", f, "..."
os.unlink(f)
# create lists for bad, never and sometimes
create_file('never.txt', sets_never)
create_file('bad.txt', sets_bad) # broadest
##create_file('sometimes.txt', sets_sometimes)
##create_file('100s/sometimes.txt', sets_sometimes)
create_file('100s/good.txt', sets_good) # broadest
create_file('100s/always.txt', sets_always)
raw_input('Done. Press ENTER')
def draw_map(colors = defaultdict((lambda: ('w', '/')))):
"""Plot tests (solutions) as squares in color with mpl"""
sol_counts = dict([('c1', 9), ('c2', 8), ('r1', 12), ('r2', 11),
('rc1', 8), ('rc2', 8) ])
from matplotlib.pyplot import subplot, show, bar, title
from itertools import cycle
groups = 'c1 r1 rc1 c2 r2 rc2'.split()
for i in xrange(6):
subplot(230+i+1)
for j in xrange(sol_counts[groups[i]]):
name = groups[i]+ "%02d" % (j+1)
print name, j, 0, colors[name]
bar(j, 0.8, color=colors[name][0], hatch=colors[name][1])
base = 1
homb_numbers = ['_%d' % (n+1,) for n in xrange(9)]+['10']
for size in "_2 _4 _6 _8 10".split():
for j in xrange(10):
name = groups[i]+size+homb_numbers[j]
print name, j, base, colors[name]
bar(j, 0.8, bottom=base, color=colors[name][0], hatch=colors[name][1])
base += 2
title(groups[i])
show()
def scan_solutions(path = '.'):
"""Search specified directories (default: current) for solutions."""
data = dict()
for dirpath, _, filenames in os.walk(path):
for f in filenames:
m = fnparse.search(f)
if m:
print f, m.group()
d = m.groupdict()
data.setdefault(d['name'].lower(), []).append((int(d['k']), float(d['dist'])))
for s in data:
data[s].sort()
return data
def k_map():
"""Plot a route count summary for solutions in/below current directory."""
best = get_best_results()
results = scan_solutions('.')
data = defaultdict((lambda: ('white', '/')))
for r in results:
if best[r][0] >= results[r][0][0]:
data[r] = ('green', '')
elif best[r][0] + 1 == results[r][0][0]:
data[r] = ('yellow', '')
else:
data[r] = ('red', '')
draw_map(data)
def dist_map():
"""Plot a distance summary for solutions in/below current directory.
Solutions with wrong route count are marked black."""
best = get_best_results()
results = scan_solutions('.')
data = defaultdict((lambda: ('white', '/')))
for r in results:
if best[r][0] < results[r][0][0]:
data[r] = ('black', '')
elif best[r][1] * 1.01 >= results[r][0][1]:
data[r] = ('green', '')
elif best[r][1] * 1.05 >= results[r][0][1]:
data[r] = ('yellow', '')
else:
data[r] = ('red', '')
draw_map(data)
def get_best_results():
"""Load a dictionary with best known result tuples."""
import vrptw
pat = os.path.join(os.path.dirname(vrptw.__file__), 'bestknown', 'sum*')
data = {}
for summ in glob.glob(pat):
for line in open(summ):
test, k, dist = line.split()
data[test.lower()] = (int(k), float(dist))
return data
def deepmap(f, something):
"""Map a nested structure, keeping the layout."""
if type(something) == list:
return [deepmap(f, x) for x in something]
elif type(something) == tuple:
return tuple(deepmap(f, list(something)))
elif type(something) == dict:
return dict([(k, deepmap(f, something[k])) for k in something])
else:
return f(something)
def enter_ipython(extra_locals = dict()):
"""Run IPython embedded shell with added locals.
To debug a specific place in script just call:
enter_ipython(locals())
"""
locals().update(extra_locals)
import IPython
IPython.Shell.IPShellEmbed()()
def plot_excess_routes(*args):
"""Display a histogram of excess routes in solutions."""
best = get_best_results()
def get_stats(path):
stats = smallstat()
mem = set()
for dirpath, _, filenames in os.walk(path):
for f in filenames:
m = fnparse.search(f)
if m:
print f, m.group()
if m.group() in mem:
print "duplicate"
continue
mem.add(m.group())
d = m.groupdict()
bk = best[d['name'].lower()]
ex = int(d['k'])-bk[0]
# print d['name'], bk, ex
stats.inc(ex)
return stats.data
if len(args) == 0:
args = ['.']
from pylab import show, xlabel, ylabel, title, xticks,hist
multibar(*map(get_stats, args))
xlabel('Excess routes')
ylabel('No. of solutions')
std_title = os.path.basename(os.getcwd())
cust_title = raw_input('Enter title (%s): '%std_title)
title(cust_title if cust_title <> '' else std_title)
locs, _ = xticks()
if locs[1] < 1:
xticks(range(len(stats.data)))
print locs
show()
# global list of functions
from types import FunctionType
funcs = filter(lambda k: type(globals()[k])==FunctionType, globals().keys())
if __name__ == '__main__':
# my well-known "call-function-from-argv" design pattern
import sys
if len(sys.argv) > 1 and sys.argv[1] in funcs:
# call function passing other args as params
res = globals()[sys.argv[1]](*sys.argv[2:])
if not res is None:
print res
else:
print "Use one, out of a subset, of these:\n "+"\n ".join(funcs)
| Python |
#!/usr/bin/env python
import sys
import glob
from itertools import repeat
import matplotlib
matplotlib.use('Qt4Agg')
import pylab
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas, NavigationToolbar2QTAgg as NavigationToolbar
from matplotlib.figure import Figure
from PyQt4 import QtCore, QtGui
from ui_helper import Ui_Helper
class ArgMap(object):
"""A class for determining indexes of sets on the plot."""
def __init__(self):
self.reset()
def reset(self):
"""Empty the mapping and counters. Also initialize."""
self.d = {}
self.n = 0
self.ticks = []
self.ticklabels = []
def checkTick(self, el):
"""Called by addOne - checks if element is first of a family."""
norm = el.replace('_', '0')
if norm.find('01.') <> -1 or norm.find('06.') <> -1:
self.ticks.append(self.d[el])
self.ticklabels.append(el[el.index('/')+1:el.index('.')])
def addOne(self, el):
"""Single unckecked additon (use __call__ to add safely)."""
self.n = self.d[el] = self.n+1
self.checkTick(el)
def add(self, els):
"""Adding multiple elements from an iterable."""
map(self, els)
def __call__(self, el):
"""Calling the object does safe mapping of element to index."""
if el not in self.d:
self.addOne(el)
return self.d[el]
class Plot(object):
"""This encapsulates details connected with the plot."""
def __init__(self, helper):
self.helper = helper
self.fig = Figure(figsize=(600,600), dpi=72, facecolor=(1,1,1), edgecolor=(0,0,0))
self.ax_k = self.fig.add_subplot(211)
self.ax_d = self.fig.add_subplot(212)
# the canvas:
self.canvas = FigureCanvas(self.fig)
# and its toolbar
self.toolbar = NavigationToolbar(self.canvas, helper)
self.attachTo(helper.ui.verticalLayout)
self.argmap = ArgMap()
self._setup_plots()
self._update_ticks()
def _setup_plots(self):
self.ax_k.set_ylabel('route count')
self.ax_d.set_ylabel('total distance')
def _update_ticks(self):
self.ax_k.set_xlim((0, self.argmap.n+1))
self.ax_k.set_xticks(self.argmap.ticks)
self.ax_k.set_xticklabels(self.argmap.ticklabels)
self.ax_d.set_xlim((0, self.argmap.n+1))
self.ax_d.set_xticks(self.argmap.ticks)
self.ax_d.set_xticklabels(self.argmap.ticklabels)
def attachTo(self, layout):
layout.addWidget(self.canvas)
layout.addWidget(self.toolbar)
def reset(self):
"""Remove plotted data from the drawing area."""
self.argmap.reset()
self.ax_k.cla()
self.ax_d.cla()
self._setup_plots()
self._update_ticks()
self.canvas.draw()
def display(self, operation):
xcoords = map(self.argmap, operation.args)
lbl = operation.get_name()
self.ax_k.plot(xcoords, operation.ks, 'o', label=lbl)
self.ax_k.legend()
ymin, ymax = self.ax_k.get_ylim()
self.ax_k.set_ylim((ymin-1, ymax+1))
self.ax_d.plot(xcoords, operation.ds, '.', label=lbl)
self.ax_d.legend()
ymin, ymax = self.ax_d.get_ylim()
spread = (ymax - ymin)*.03
self.ax_d.set_ylim((ymin-spread, ymax+spread))
self._update_ticks()
self.canvas.draw()
class Operation(object):
"""An abstract operation for the sets to perform"""
def __init__(self, args):
if type(args) == str:
self.args = self.find_args(args)
else:
self.args = args
self.ks = []
self.ds = []
def find_args(self, argstr):
from glob import glob
return sorted(glob(argstr), key=lambda x: x.replace('_', '0'))
def get_name(self):
"""Description of operation, e.g. for plot label."""
return 'abstract'
def best_val(name):
"""The mapping function for best known value."""
from pygrout import VrptwTask
task = VrptwTask(name, False)
return task.bestval()
class BestOperation(Operation):
def get_iterator(self, worker):
return worker.p.imap(best_val, self.args)
def get_name(self):
return 'b.known'
def savings_val(task):
"""The mapping function for savings heuristic."""
name, waitlimit, mi = task
from pygrout import VrptwSolution, VrptwTask, build_by_savings
print "Should process", name
sol = VrptwSolution(VrptwTask(name))
build_by_savings(sol, waitlimit, mi)
return sol.val()
def mfsavings_val(task):
"""The mapping function for savings heuristic."""
name, waitlimit, mi = task
from pygrout import VrptwSolution, VrptwTask, build_by_mfsavings
print "Should process", name
sol = VrptwSolution(VrptwTask(name))
build_by_mfsavings(sol, waitlimit, mi)
return sol.val()
class SavingsOperation(Operation):
def __init__(self, args, mi, waitlimit, mfs=True):
Operation.__init__(self, args)
self.mi = mi
self.waitlimit = waitlimit
self.mfs = mfs
def get_iterator(self, worker):
tasks = zip(self.args, repeat(self.waitlimit), repeat(self.mi))
if self.mfs:
return worker.p.imap(mfsavings_val, tasks)
return worker.p.imap(savings_val, tasks)
def get_name(self):
desc =("mfs(%.1f)" if self.mfs else "sav(%.1f)") % self.mi
if self.waitlimit:
desc += "WL(%d)" % self.waitlimit
return desc
def greedy_val(task):
name, order = task
from pygrout import VrptwSolution, VrptwTask, build_first
VrptwTask.sort_order = order
sol = VrptwSolution(VrptwTask(name))
build_first(sol)
return sol.val()
class GreedyOperation(Operation):
def __init__(self, args, order):
Operation.__init__(self, args)
self.order = order
def get_iterator(self, worker):
tasks = zip(self.args, repeat(self.order))
return worker.p.imap(greedy_val, tasks)
def get_name(self):
return self.order
class Worker(QtCore.QThread):
"""An active object for background computations."""
def __init__(self, helper, parent = None):
super(Worker, self).__init__(parent)
self.helper = helper
# custom signals for the GUI
QtCore.QObject.connect(self, QtCore.SIGNAL("progress(int)"), helper.update_progress)
QtCore.QObject.connect(self, QtCore.SIGNAL("newProgress(int)"), helper.init_progress)
# terminating signals for the GUI
QtCore.QObject.connect(self, QtCore.SIGNAL("finished()"), helper.background_done)
QtCore.QObject.connect(self, QtCore.SIGNAL("terminated()"), helper.background_done)
# the operation, passed before starting the thread
self.currentOp = None
# a single pool for processing
from multiprocessing import Pool
self.p = Pool()
def run(self):
if not self.currentOp:
return
self.emit(QtCore.SIGNAL('newProgress(int)'), len(self.currentOp.args))
numDone = 0
for k, d in self.currentOp.get_iterator(self):
self.currentOp.ks.append(k)
self.currentOp.ds.append(d)
numDone += 1
self.emit(QtCore.SIGNAL('progress(int)'), numDone)
self.helper.plot.display(self.currentOp)
def performOperation(self, operation):
self.currentOp = operation
self.helper.lock_ui()
self.start()
class Helper(QtGui.QDialog):
def __init__(self, parent=None):
# boilerplate
QtGui.QDialog.__init__(self, parent)
self.ui = Ui_Helper()
self.ui.setupUi(self)
# load sorting orders into combobox
from vrptw import sort_keys
self.ui.greedyOrder.addItems(sorted(sort_keys.keys()))
# add custom mpl canvas
self.plot = Plot(self)
# the worker thread (one, for now)
self.worker = Worker(self)
# the stopwatch placeholder
self.watch = '(no watch set!)'
QtCore.QObject.connect(self.ui.update, QtCore.SIGNAL("clicked()"), self.plot_savings)
QtCore.QObject.connect(self.ui.best, QtCore.SIGNAL("clicked()"), self.plot_best)
QtCore.QObject.connect(self.ui.greedy, QtCore.SIGNAL("clicked()"), self.plot_greedy)
QtCore.QObject.connect(self.ui.clearPlot, QtCore.SIGNAL("clicked()"), self.clear_plot)
def lock_ui(self):
"""Called before entering the background operation."""
from stopwatch import StopWatch
self.watch = StopWatch()
self.ui.update.setEnabled(False)
self.ui.best.setEnabled(False)
def background_done(self):
"""Slot to unlock some UI elements after finished background operation."""
self.ui.update.setEnabled(True)
self.ui.best.setEnabled(True)
self.ui.progressBar.setEnabled(False)
self.ui.textEdit.append("Processing finished in %s seconds" % self.watch)
print "What now?", self.watch
def plot_best(self):
self.worker.performOperation(BestOperation(self.tests_chosen()))
def plot_savings(self):
mi = self.ui.mi.value()
waitlimit = self.ui.waitlimit.value() if self.ui.has_waitlimit.checkState() else None
mfs = self.ui.mfs.checkState()
self.worker.performOperation(SavingsOperation(self.tests_chosen(), mi, waitlimit, mfs))
def plot_greedy(self):
order = str(self.ui.greedyOrder.currentText())
self.worker.performOperation(GreedyOperation(self.tests_chosen(), order))
def clear_plot(self):
"""Slot for clearing the plot."""
self.plot.reset()
def init_progress(self, maxProgress):
"""Slot for resetting the progress bar's value to 0 with a new maximum."""
self.ui.progressBar.setEnabled(True)
self.ui.progressBar.setMaximum(maxProgress)
self.ui.progressBar.setValue(0)
def update_progress(self, progress):
"""Slot for updating the progress bar."""
print "--- one done ---"
self.ui.progressBar.setValue(progress)
def tests_chosen(self):
"""Return the selected pattern in the families list."""
return str(self.ui.families.currentItem().text())
if __name__ == "__main__":
app = QtGui.QApplication(sys.argv)
# almost standard:
helper = Helper()
helper.show()
sys.exit(app.exec_())
| Python |
#!/usr/bin/env python
homberger_urls = [
'http://www.sintef.no/Projectweb/TOP/Problems/VRPTW/Homberger-benchmark/%d00-customers/' % n
for n in xrange(2,11,2)
]
solomon_urls = [
'http://web.cba.neu.edu/~msolomon/c1c2solu.htm',
'http://web.cba.neu.edu/~msolomon/r1r2solu.htm',
'http://web.cba.neu.edu/~msolomon/rc12solu.htm',
'http://web.cba.neu.edu/~msolomon/heuristi.htm'
]
import re
import urllib2
import time
homb = re.compile(r'''<td style.*?([rc]{1,2}[12][0-9_]{4,6}).*?<td.*?(\d+).*?<td.*?([\d\.]+)''', re.DOTALL)
solo = re.compile('''([RC]{1,2}[12]\d{2}\.?\d{0,3})\s*(\d+)\s*(\d+\.\d+)''', re.DOTALL)
# download function
get = lambda url: urllib2.urlopen(url).read()
def save(match):
name, vehicles, distance = match.groups()
name = name.replace('_10', '10')
fname = 'vrptw/bestknown/%s.txt' % name
old_data = open(fname).read()
new_data = '%s %s\n' % (vehicles, distance)
summary = '%-7s %3s %s' % (name, vehicles, distance)
if old_data <> new_data:
open(fname, 'w').write(new_data)
print summary, 'CHANGED, from:', old_data
else:
print summary
return summary
def sanitize(dta):
"""Prepare some bad HTML for easier regexp scanning."""
dta = re.compile("<!--.*?-->", re.DOTALL).sub('', dta)
dta = re.compile("<style.*?</style>", re.DOTALL).sub('', dta)
dta = re.compile("<.*?>", re.DOTALL).sub('', dta)
dta = re.sub('[^\d\n \.RC]+', ' ', dta)
return dta
def get_hombergers_sintef():
"""Download best result for Hombergers tests from SINTEF site."""
summary = []
for u in homberger_urls:
print "Visiting", u
start = time.time()
data = get(u)
s, kb = time.time()-start, len(data)/1024.0
found = homb.finditer(data)
for m in found:
summary.append(save(m))
print "Downloaded %.1f KB in %.1f s (%.1f KB/s)" % (kb, s, kb/s)
open('vrptw/bestknown/summary_H.txt', 'w').write("\n".join(sorted(summary,
key=lambda x: x.replace('_', '0'))))
def get_solomons():
"""Download best results for Solomons tests as published by himself."""
import os.path
# local debug proxy for downloading - uncomment below
# get = lambda url: open(os.path.basename(url)).read()
summary = []
for u in solomon_urls:
print "Visiting", u
start = time.time()
data = sanitize(get(u))
found = solo.finditer(data)
for m in found:
summary.append(save(m))
s, kb = time.time()-start, len(data)/1024.0
print "Downloaded %.1f KB in %.1f s (%.1f KB/s)" % (kb, s, kb/s)
open('vrptw/bestknown/summary_S.txt', 'w').write("\n".join(sorted(summary,
key=lambda x: x.replace('_', '0'))))
if __name__ == '__main__':
get_hombergers_sintef()
# get_solomons()
| Python |
from vrptw.consts import *
from itertools import count
def pairs(iterable):
"""A generator for adjacent elements of an iterable."""
it = iter(iterable)
prev = it.next()
for next_ in it:
yield (prev, next_)
prev = next_
def test_pairs():
"""Unit test for pairs() generator."""
for actual, expected in zip(pairs(range(5)), [(i, i+1) for i in range(4)]):
assert actual == expected
def d(s):
"""Debug print with a sleep."""
import time
print s
time.sleep(1)
def dd(s):
"""Debug print, no sleep."""
print s
def solution_header(sol):
"""
The Czarnas' code features a 'routeCostMultipiler', which is used like this:
(Solution.h)
routeCostMultiplier = ROUTE_COST_WEIGHT * MAX_CUSTOMERS;
FLOAT result = routeCostMultiplier * routes + TO_FLOAT(totalDistance);
(constants.h)
#define ROUTE_COST_WEIGHT (2.0*((MAX_X - MIN_X)+(MAX_Y - MIN_Y)))
#define MAX_CUSTOMERS 100
#define MIN_X 0.0
#define MAX_X 100.0
#define MIN_Y 0.0
#define MAX_Y 100.0
-> This formula is - arguably - bad, because it depends on the number
of customers and the coordinates range, which for Homberger tests
is different and quite large and can overflow integers
for 100 - routeCostMultiplier = 40 000, >2**15
for 1000 - 4 000 000 > 2**21, multiplied by 6 decimal places (20 bits)
additionally, TODO
"""
routeCostMultiplier = 40000
value = sol.dist + len(sol.r) * routeCostMultiplier
result = "Solution:\nRoutes: %d\n" % (len(sol.r))
result += "Vehicle capacity: %.2f\nSolution value: %.3f\n" % (sol.task.capa, value)
result += "Total travel distance: %.3f\n" % sol.dist
return result
def print_like_Czarnas(sol, sparse=False):
"""Prints the solution in a form compatible (and diffable) with Czarnas."""
result = solution_header(sol)
for rt, num in zip(sol.r, count(1)):
if (not sparse) or rt[R_LEN] > 2:
result += "Route: %d, len: %d, dist: %.3f, max cap: %.2f" % (
num, rt[R_LEN], rt[R_DIS], rt[R_CAP])
result += ", route: "+"-".join(
str(e[E_FRO]) for e in rt[R_EDG][1:] )+"\n"
if sparse and any(rt[R_LEN]==2 for rt in sol.r):
result += "Single routes: " + ", ".join(str(rt[R_EDG][1][E_FRO]) for rt in sol.r if rt[R_LEN]==2)+"\n"
print result
def print_like_Czarnas_long(sol):
"""Prints a verbose description of the solution (one line per customer).
Compatible with the printSolutionAllData() method in the reference code
DATATYPE dist = data.getDistance(DEPOT, getRouteStart(r));
for (int c = getRouteStart(r); c != DEPOT; c = cust[c].getNext()) {
initCap -= TO_FLOAT(data.getDemand(c));
fprintf(output, "(%2d, %7.2f, %7.2f, %7.2f, %7.2f, %5.2f, %6.2f, %6.2f, %4.1f)\n", c,
TO_FLOAT(cust[c].getArrival()),
TO_FLOAT(cust[c].getLatestArrival()),
TO_FLOAT(data.getBeginTime(c)), TO_FLOAT(data.getEndTime(c)),
TO_FLOAT(data.getServiceTime(c)),
TO_FLOAT(data.getDistance(cust[c].getPrev(), c)), initCap,
TO_FLOAT(data.getDemand(c)));
if (initCap > TO_FLOAT(data.getVehicleCapacity()) || initCap < 0.0)
fprintf(output, "************* vehicle capacity violated!!!\n");
dist += data.getDistance(c, cust[c].getNext());
}
"""
result = solution_header(sol)
for rt, num in zip(sol.r, count(1)):
result += (
"Route: %d\nRoute length: %d\nRoute cost: %.3f\n"
"Init capacity: %.2f, max capacity = %.2f\n" %
(num, rt[R_LEN], rt[R_DIS], rt[R_CAP], rt[R_CAP]) +
"Route \n"
"(cust, arriv, ltstArr, bgnWind, endWind, srvcT, dstPrv, weight, dem):\n"
" ------------------------------------------------------------------\n"
)
wgt = 0
for bef, aft in pairs(rt[R_EDG]):
cust = bef[E_TOW]
result += (
"(%2d, %7.2f, %7.2f, %7.2f, %7.2f, %5.2f, %6.2f, %6.2f, %4.1f)\n" %
( cust, aft[E_ARF], bef[E_LAT], sol.a(cust), sol.b(cust),
sol.task.cust[cust][SRV], sol.d(bef[E_FRO], cust), wgt, sol.dem(cust) )
)
result += "\n"
print result
def symbol(i):
"""Return a suitable symbol for displaying the customer"""
for t, f in [
(62, lambda x: '+'),
(36, lambda x: chr(ord('a')+x-36)),
(10, lambda x: chr(ord('A')+x-10)),
( 0, lambda x: chr(ord('0')+x)),
(None, lambda x: '?') ]:
if i >= t:
return f(i)
def describe(sol, cols=50, onlyrouted=True):
"""Produces a textual representation of the task."""
customers = [ sol.task.cust[c] for c in
set(x[E_FRO] for r in sol.r for x in r[R_EDG])
] if onlyrouted else sol.task.cust
minx, maxx, miny, maxy = [
op( x[k] for x in customers ) for k in X, Y for op in min, max ]
sx, sy = (maxx - minx), (maxy-miny)
rows = sy * cols // sx
board = [ [ ' ' for i in xrange(cols+1) ] for j in xrange(rows+1) ]
for y, x, i in [ ((c[Y]-miny)*rows//sy, (c[X]-minx)*cols//sx, c[ID])
for c in customers ]:
board[y][x] = symbol(i)
print "\n".join("".join(row) for row in board[::-1])
| Python |
# undo handlers
def undo_ins(list_, idx):
list_.pop(idx)
def undo_pop(list_, idx, val):
list_.insert(idx, val)
def undo_set(list_, idx, val):
list_[idx] = val
def undo_atr(obj, atr, val):
setattr(obj, atr, val)
def undo_add(list_, idx, val):
list_[idx] -= val
def undo_ada(obj, atr, val):
setattr(obj, atr, getattr(obj, atr) - val)
# undo elements type:
#
U_ELEM_IN, U_ELEM_OUT, U_ELEM_MOD, U_ATTRIB, U_ADD, U_ADA, U_CHECKPOINT = range(7)
# undo mapping
handlers = [ undo_ins, undo_pop, undo_set, setattr, undo_add, undo_ada ]
class UndoStack(object):
"""Holds description of a sequence of operations, possibly separated by checkpoints."""
def __init__(self):
"""Construct empty undo stack."""
self.commit()
def ins(self, list_, idx, value):
"""Inserts the value at a specific index in list and returns for chaining."""
self.actions.append( (U_ELEM_IN, (list_, idx)) ) # value not needed
list_.insert(idx, value)
return value
def pop(self, list_, idx):
"""Removes a list element and returns its value."""
data = list_.pop(idx)
self.actions.append( (U_ELEM_OUT, (list_, idx, data)) )
return data
def set(self, list_, idx, value):
"""Sets a list element to new value, returns it for possible chaining."""
self.actions.append( (U_ELEM_MOD, (list_, idx, list_[idx])) )
list_[idx] = value
return value
def checkpoint(self):
"""Marks current state and returns the marker."""
self.point += 1
self.actions.append( (U_CHECKPOINT, self.point) )
return self.point
def atr(self, obj, atr, val):
"""Change an object's attribute."""
data = getattr(obj, atr)
self.actions.append( (U_ATTRIB, (obj, atr, data)) )
setattr(obj, atr, val)
return val
def add(self, list_, idx, value):
"""Inplace add something to list element."""
self.actions.append( (U_ADD, (list_, idx, value)) )
list_[idx] += value
def ada(self, obj, atr, val):
"""Inplace add to object's attribute."""
data = getattr(obj, atr)
self.actions.append( (U_ADA, (obj, atr, val)) )
setattr(obj, atr, val+data)
return val+data
def commit(self):
"""Forget all undo information."""
self.actions = []
self.point = 0
def undo(self, checkpoint = None):
"""Reverse all operation performed through this stack, or up to a checkpoint."""
assert checkpoint <= self.point, 'Undo to invalid checkpoint'
while len(self.actions):
tag, args = self.actions.pop()
if tag == U_CHECKPOINT:
if args == checkpoint:
self.point = checkpoint-1
break
else:
# print tag, args
handlers[tag](*args)
def undo_last(self):
"""Rollback actions to last checkpoint."""
assert self.point > 0, 'No actions to undo'
self.undo(self.point)
class TestUndoStack(object):
"""Unit test class for py.test"""
def setup_class(self):
"""Create the UndoStack used with every test and an example list."""
self.u = UndoStack()
self.l_orig = [7, 'dolorem', 4, None, 5.3]
self.l = self.l_orig[:]
def setup_method(self, method):
"""Restore the example list, not needed if tests pass, undo does it."""
# self.l = self.l_orig[:]
def test_ins(self):
"""Undoing an insertion."""
self.u.ins(self.l, 0, 2)
expected = [2]+self.l_orig
assert self.l == expected
self.u.undo()
assert self.l == self.l_orig
def test_pop(self):
out = self.u.pop(self.l, 2)
assert out == 4
self.u.undo()
assert self.l == self.l_orig
def test_set(self):
self.u.set(self.l, 1, 'ipsum')
assert self.l[1] == 'ipsum'
self.u.undo()
assert self.l == self.l_orig
def test_sequence(self):
self.u.pop(self.l, 3)
self.u.ins(self.l, 3, 123)
tag = self.u.checkpoint()
l_on_check = self.l[:]
self.u.set(self.l, 0, 0)
self.u.pop(self.l, 0)
self.u.undo(tag)
assert l_on_check == self.l
self.u.undo()
assert self.l == self.l_orig
def test_atr(self):
self.color = 'red'
self.u.atr(self, 'color', 'blue')
assert self.color == 'blue'
self.u.undo()
assert self.color == 'red'
| Python |
# Copyright 2011 The Go Authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
mail_from = "Go Dashboard <builder@golang.org>"
mail_submit_to = "adg@golang.org"
mail_submit_subject = "New Project Submitted"
| Python |
# Copyright 2010 The Go Authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
# This is a Django custom template filter to work around the
# fact that GAE's urlencode filter doesn't handle unicode strings.
from google.appengine.ext import webapp
register = webapp.template.create_template_register()
@register.filter
def toutf8(value):
return value.encode("utf-8")
| Python |
# Copyright 2010 The Go Authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
from google.appengine.api import mail
from google.appengine.api import memcache
from google.appengine.api import users
from google.appengine.ext import db
from google.appengine.ext import webapp
from google.appengine.ext.webapp import template
from google.appengine.ext.webapp.util import run_wsgi_app
import os
import sets
# local imports
import toutf8
import const
template.register_template_library('toutf8')
class Project(db.Model):
name = db.StringProperty(indexed=True)
descr = db.StringProperty()
web_url = db.StringProperty()
category = db.StringProperty(indexed=True)
tags = db.ListProperty(str)
approved = db.BooleanProperty(indexed=True)
CacheTimeout = 3600
class ProjectPage(webapp.RequestHandler):
def get(self):
admin = users.is_current_user_admin()
if self.request.path == "/project/login":
self.redirect(users.create_login_url("/project"))
elif self.request.path == "/project/edit" and admin:
self.edit()
else:
self.list()
def post(self):
if self.request.path == "/project/edit":
self.edit(True)
else:
data = dict(map(lambda x: (x, self.request.get(x)), ["name","descr","web_url"]))
if reduce(lambda x, y: x or not y, data.values(), False):
data["submitMsg"] = "You must complete all the fields."
self.list(data)
return
p = Project.get_by_key_name("proj-"+data["name"])
if p is not None:
data["submitMsg"] = "A project by this name already exists."
self.list(data)
return
p = Project(key_name="proj-"+data["name"], **data)
p.put()
path = os.path.join(os.path.dirname(__file__), 'project-notify.txt')
mail.send_mail(
sender=const.mail_from,
to=const.mail_submit_to,
subject=const.mail_submit_subject,
body=template.render(path, {'project': p}))
self.list({"submitMsg": "Your project has been submitted."})
def list(self, additional_data={}):
cache_key = 'view-project-data'
tag = self.request.get('tag', None)
if tag:
cache_key += '-'+tag
data = memcache.get(cache_key)
admin = users.is_current_user_admin()
if admin or not data:
projects = Project.all().order('category').order('name')
if not admin:
projects = projects.filter('approved =', True)
projects = list(projects)
tags = sets.Set()
for p in projects:
for t in p.tags:
tags.add(t)
if tag:
projects = filter(lambda x: tag in x.tags, projects)
data = {}
data['tag'] = tag
data['tags'] = tags
data['projects'] = projects
data['admin']= admin
if not admin:
memcache.set(cache_key, data, time=CacheTimeout)
for k, v in additional_data.items():
data[k] = v
self.response.headers['Content-Type'] = 'text/html; charset=utf-8'
path = os.path.join(os.path.dirname(__file__), 'project.html')
self.response.out.write(template.render(path, data))
def edit(self, save=False):
if save:
name = self.request.get("orig_name")
else:
name = self.request.get("name")
p = Project.get_by_key_name("proj-"+name)
if not p:
self.response.out.write("Couldn't find that Project.")
return
if save:
if self.request.get("do") == "Delete":
p.delete()
else:
for f in ['name', 'descr', 'web_url', 'category']:
setattr(p, f, self.request.get(f, None))
p.approved = self.request.get("approved") == "1"
p.tags = filter(lambda x: x, self.request.get("tags", "").split(","))
p.put()
memcache.delete('view-project-data')
self.redirect('/project')
return
# get all project categories and tags
cats, tags = sets.Set(), sets.Set()
for r in Project.all():
cats.add(r.category)
for t in r.tags:
tags.add(t)
self.response.headers['Content-Type'] = 'text/html; charset=utf-8'
path = os.path.join(os.path.dirname(__file__), 'project-edit.html')
self.response.out.write(template.render(path, {
"taglist": tags, "catlist": cats, "p": p, "tags": ",".join(p.tags) }))
def redirect(self, url):
self.response.set_status(302)
self.response.headers.add_header("Location", url)
def main():
app = webapp.WSGIApplication([
('/.*', ProjectPage),
], debug=True)
run_wsgi_app(app)
if __name__ == '__main__':
main()
| Python |
# Copyright 2010 The Go Authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
"""GDB Pretty printers and convenience functions for Go's runtime structures.
This script is loaded by GDB when it finds a .debug_gdb_scripts
section in the compiled binary. The [68]l linkers emit this with a
path to this file based on the path to the runtime package.
"""
# Known issues:
# - pretty printing only works for the 'native' strings. E.g. 'type
# foo string' will make foo a plain struct in the eyes of gdb,
# circumventing the pretty print triggering.
import sys, re
print >>sys.stderr, "Loading Go Runtime support."
# allow to manually reload while developing
goobjfile = gdb.current_objfile() or gdb.objfiles()[0]
goobjfile.pretty_printers = []
#
# Pretty Printers
#
class StringTypePrinter:
"Pretty print Go strings."
pattern = re.compile(r'^struct string$')
def __init__(self, val):
self.val = val
def display_hint(self):
return 'string'
def to_string(self):
l = int(self.val['len'])
return self.val['str'].string("utf-8", "ignore", l)
class SliceTypePrinter:
"Pretty print slices."
pattern = re.compile(r'^struct \[\]')
def __init__(self, val):
self.val = val
def display_hint(self):
return 'array'
def to_string(self):
return str(self.val.type)[6:] # skip 'struct '
def children(self):
if self.val["len"] > self.val["cap"]:
return
ptr = self.val["array"]
for idx in range(self.val["len"]):
yield ('[%d]' % idx, (ptr + idx).dereference())
class MapTypePrinter:
"""Pretty print map[K]V types.
Map-typed go variables are really pointers. dereference them in gdb
to inspect their contents with this pretty printer.
"""
pattern = re.compile(r'^struct hash<.*>$')
def __init__(self, val):
self.val = val
def display_hint(self):
return 'map'
def to_string(self):
return str(self.val.type)
def children(self):
stab = self.val['st']
i = 0
for v in self.traverse_hash(stab):
yield ("[%d]" % i, v['key'])
yield ("[%d]" % (i + 1), v['val'])
i += 2
def traverse_hash(self, stab):
ptr = stab['entry'].address
last = stab['last']
while ptr <= last:
v = ptr.dereference()
ptr = ptr + 1
if v['hash'] == 0: continue
if v['hash'] & 63 == 63: # subtable
for v in self.traverse_hash(v['key'].cast(self.val['st'].type)):
yield v
else:
yield v
class ChanTypePrinter:
"""Pretty print chan[T] types.
Chan-typed go variables are really pointers. dereference them in gdb
to inspect their contents with this pretty printer.
"""
pattern = re.compile(r'^struct hchan<.*>$')
def __init__(self, val):
self.val = val
def display_hint(self):
return 'array'
def to_string(self):
return str(self.val.type)
def children(self):
# see chan.c chanbuf(). et is the type stolen from hchan<T>::recvq->first->elem
et = [x.type for x in self.val['recvq']['first'].type.target().fields() if x.name == 'elem'][0]
ptr = (self.val.address + 1).cast(et.pointer())
for i in range(self.val["qcount"]):
j = (self.val["recvx"] + i) % self.val["dataqsiz"]
yield ('[%d]' % i, (ptr + j).dereference())
#
# Register all the *Printer classes above.
#
def makematcher(klass):
def matcher(val):
try:
if klass.pattern.match(str(val.type)):
return klass(val)
except:
pass
return matcher
goobjfile.pretty_printers.extend([makematcher(k) for k in vars().values() if hasattr(k, 'pattern')])
#
# For reference, this is what we're trying to do:
# eface: p *(*(struct 'runtime.commonType'*)'main.e'->type_->data)->string
# iface: p *(*(struct 'runtime.commonType'*)'main.s'->tab->Type->data)->string
#
# interface types can't be recognized by their name, instead we check
# if they have the expected fields. Unfortunately the mapping of
# fields to python attributes in gdb.py isn't complete: you can't test
# for presence other than by trapping.
def is_iface(val):
try:
return str(val['tab'].type) == "struct runtime.itab *" \
and str(val['data'].type) == "void *"
except:
pass
def is_eface(val):
try:
return str(val['_type'].type) == "struct runtime._type *" \
and str(val['data'].type) == "void *"
except:
pass
def lookup_type(name):
try:
return gdb.lookup_type(name)
except:
pass
try:
return gdb.lookup_type('struct ' + name)
except:
pass
try:
return gdb.lookup_type('struct ' + name[1:]).pointer()
except:
pass
_rctp_type = gdb.lookup_type("struct runtime.commonType").pointer()
_rtp_type = gdb.lookup_type("struct runtime._type").pointer()
def iface_commontype(obj):
if is_iface(obj):
go_type_ptr = obj['tab']['_type']
elif is_eface(obj):
go_type_ptr = obj['_type']
else:
return
# sanity check: reflection type description ends in a loop.
tt = go_type_ptr['_type'].cast(_rtp_type).dereference()['_type']
if tt != tt.cast(_rtp_type).dereference()['_type']:
return
return go_type_ptr['ptr'].cast(_rctp_type).dereference()
def iface_dtype(obj):
"Decode type of the data field of an eface or iface struct."
# known issue: dtype_name decoded from runtime.commonType is "nested.Foo"
# but the dwarf table lists it as "full/path/to/nested.Foo"
dynamic_go_type = iface_commontype(obj)
if dynamic_go_type is None:
return
dtype_name = dynamic_go_type['string'].dereference()['str'].string()
dynamic_gdb_type = lookup_type(dtype_name)
if dynamic_gdb_type is None:
return
type_size = int(dynamic_go_type['size'])
uintptr_size = int(dynamic_go_type['size'].type.sizeof) # size is itself an uintptr
if type_size > uintptr_size:
dynamic_gdb_type = dynamic_gdb_type.pointer()
return dynamic_gdb_type
def iface_dtype_name(obj):
"Decode type name of the data field of an eface or iface struct."
dynamic_go_type = iface_commontype(obj)
if dynamic_go_type is None:
return
return dynamic_go_type['string'].dereference()['str'].string()
class IfacePrinter:
"""Pretty print interface values
Casts the data field to the appropriate dynamic type."""
def __init__(self, val):
self.val = val
def display_hint(self):
return 'string'
def to_string(self):
if self.val['data'] == 0:
return 0x0
try:
dtype = iface_dtype(self.val)
except:
return "<bad dynamic type>"
if dtype is None: # trouble looking up, print something reasonable
return "(%s)%s" % (iface_dtype_name(self.val), self.val['data'])
try:
return self.val['data'].cast(dtype).dereference()
except:
pass
return self.val['data'].cast(dtype)
def ifacematcher(val):
if is_iface(val) or is_eface(val):
return IfacePrinter(val)
goobjfile.pretty_printers.append(ifacematcher)
#
# Convenience Functions
#
class GoLenFunc(gdb.Function):
"Length of strings, slices, maps or channels"
how = ((StringTypePrinter, 'len'),
(SliceTypePrinter, 'len'),
(MapTypePrinter, 'count'),
(ChanTypePrinter, 'qcount'))
def __init__(self):
super(GoLenFunc, self).__init__("len")
def invoke(self, obj):
typename = str(obj.type)
for klass, fld in self.how:
if klass.pattern.match(typename):
return obj[fld]
class GoCapFunc(gdb.Function):
"Capacity of slices or channels"
how = ((SliceTypePrinter, 'cap'),
(ChanTypePrinter, 'dataqsiz'))
def __init__(self):
super(GoCapFunc, self).__init__("cap")
def invoke(self, obj):
typename = str(obj.type)
for klass, fld in self.how:
if klass.pattern.match(typename):
return obj[fld]
class DTypeFunc(gdb.Function):
"""Cast Interface values to their dynamic type.
For non-interface types this behaves as the identity operation.
"""
def __init__(self):
super(DTypeFunc, self).__init__("dtype")
def invoke(self, obj):
try:
return obj['data'].cast(iface_dtype(obj))
except:
pass
return obj
#
# Commands
#
sts = ('idle', 'runnable', 'running', 'syscall', 'waiting', 'moribund', 'dead', 'recovery')
def linked_list(ptr, linkfield):
while ptr:
yield ptr
ptr = ptr[linkfield]
class GoroutinesCmd(gdb.Command):
"List all goroutines."
def __init__(self):
super(GoroutinesCmd, self).__init__("info goroutines", gdb.COMMAND_STACK, gdb.COMPLETE_NONE)
def invoke(self, arg, from_tty):
# args = gdb.string_to_argv(arg)
vp = gdb.lookup_type('void').pointer()
for ptr in linked_list(gdb.parse_and_eval("'runtime.allg'"), 'alllink'):
if ptr['status'] == 6: # 'gdead'
continue
s = ' '
if ptr['m']:
s = '*'
pc = ptr['sched']['pc'].cast(vp)
sp = ptr['sched']['sp'].cast(vp)
blk = gdb.block_for_pc(long((pc)))
print s, ptr['goid'], "%8s" % sts[long((ptr['status']))], blk.function
def find_goroutine(goid):
vp = gdb.lookup_type('void').pointer()
for ptr in linked_list(gdb.parse_and_eval("'runtime.allg'"), 'alllink'):
if ptr['status'] == 6: # 'gdead'
continue
if ptr['goid'] == goid:
return [ptr['sched'][x].cast(vp) for x in 'pc', 'sp']
return None, None
class GoroutineCmd(gdb.Command):
"""Execute gdb command in the context of goroutine <goid>.
Switch PC and SP to the ones in the goroutine's G structure,
execute an arbitrary gdb command, and restore PC and SP.
Usage: (gdb) goroutine <goid> <gdbcmd>
Note that it is ill-defined to modify state in the context of a goroutine.
Restrict yourself to inspecting values.
"""
def __init__(self):
super(GoroutineCmd, self).__init__("goroutine", gdb.COMMAND_STACK, gdb.COMPLETE_NONE)
def invoke(self, arg, from_tty):
goid, cmd = arg.split(None, 1)
pc, sp = find_goroutine(int(goid))
if not pc:
print "No such goroutine: ", goid
return
save_frame = gdb.selected_frame()
gdb.parse_and_eval('$save_pc = $pc')
gdb.parse_and_eval('$save_sp = $sp')
gdb.parse_and_eval('$pc = 0x%x' % long(pc))
gdb.parse_and_eval('$sp = 0x%x' % long(sp))
try:
gdb.execute(cmd)
finally:
gdb.parse_and_eval('$pc = $save_pc')
gdb.parse_and_eval('$sp = $save_sp')
save_frame.select()
class GoIfaceCmd(gdb.Command):
"Print Static and dynamic interface types"
def __init__(self):
super(GoIfaceCmd, self).__init__("iface", gdb.COMMAND_DATA, gdb.COMPLETE_SYMBOL)
def invoke(self, arg, from_tty):
for obj in gdb.string_to_argv(arg):
try:
#TODO fix quoting for qualified variable names
obj = gdb.parse_and_eval("%s" % obj)
except Exception, e:
print "Can't parse ", obj, ": ", e
continue
if obj['data'] == 0:
dtype = "nil"
else:
dtype = iface_dtype(obj)
if dtype is None:
print "Not an interface: ", obj.type
continue
print "%s: %s" % (obj.type, dtype)
# TODO: print interface's methods and dynamic type's func pointers thereof.
#rsc: "to find the number of entries in the itab's Fn field look at itab.inter->numMethods
#i am sure i have the names wrong but look at the interface type and its method count"
# so Itype will start with a commontype which has kind = interface
#
# Register all convenience functions and CLI commands
#
for k in vars().values():
if hasattr(k, 'invoke'):
k()
| Python |
# Copyright 2010 The Go Authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
# This code is used to parse the debug log from gnutls-cli and generate a
# script of the handshake. This script is included in handshake_server_test.go.
# See the comments there for details.
import sys
blocks = []
READ = 1
WRITE = 2
currentBlockType = 0
currentBlock = []
for line in sys.stdin.readlines():
line = line[:-1]
if line.startswith("|<7>| WRITE: "):
if currentBlockType != WRITE:
if len(currentBlock) > 0:
blocks.append(currentBlock)
currentBlock = []
currentBlockType = WRITE
elif line.startswith("|<7>| READ: "):
if currentBlockType != READ:
if len(currentBlock) > 0:
blocks.append(currentBlock)
currentBlock = []
currentBlockType = READ
elif line.startswith("|<7>| 0"):
line = line[13:]
line = line.strip()
bs = line.split()
for b in bs:
currentBlock.append(int(b, 16))
elif line.startswith("|<7>| RB-PEEK: Read 1 bytes"):
currentBlock = currentBlock[:-1]
if len(currentBlock) > 0:
blocks.append(currentBlock)
for block in blocks:
sys.stdout.write("\t{\n")
i = 0
for b in block:
if i % 8 == 0:
sys.stdout.write("\t\t")
sys.stdout.write("0x%02x," % b)
if i % 8 == 7:
sys.stdout.write("\n")
else:
sys.stdout.write(" ")
i += 1
sys.stdout.write("\n\t},\n\n")
| Python |
# coding=utf-8
# (The line above is necessary so that I can use 世界 in the
# *comment* below without Python getting all bent out of shape.)
# Copyright 2007-2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''Mercurial interface to codereview.appspot.com.
To configure, set the following options in
your repository's .hg/hgrc file.
[extensions]
codereview = /path/to/codereview.py
[codereview]
server = codereview.appspot.com
The server should be running Rietveld; see http://code.google.com/p/rietveld/.
In addition to the new commands, this extension introduces
the file pattern syntax @nnnnnn, where nnnnnn is a change list
number, to mean the files included in that change list, which
must be associated with the current client.
For example, if change 123456 contains the files x.go and y.go,
"hg diff @123456" is equivalent to"hg diff x.go y.go".
'''
import sys
if __name__ == "__main__":
print >>sys.stderr, "This is a Mercurial extension and should not be invoked directly."
sys.exit(2)
# We require Python 2.6 for the json package.
if sys.version < '2.6':
print >>sys.stderr, "The codereview extension requires Python 2.6 or newer."
print >>sys.stderr, "You are running Python " + sys.version
sys.exit(2)
import json
import os
import re
import stat
import subprocess
import threading
import time
from mercurial import commands as hg_commands
from mercurial import util as hg_util
defaultcc = None
codereview_disabled = None
real_rollback = None
releaseBranch = None
server = "codereview.appspot.com"
server_url_base = None
#######################################################################
# Normally I would split this into multiple files, but it simplifies
# import path headaches to keep it all in one file. Sorry.
# The different parts of the file are separated by banners like this one.
#######################################################################
# Helpers
def RelativePath(path, cwd):
n = len(cwd)
if path.startswith(cwd) and path[n] == '/':
return path[n+1:]
return path
def Sub(l1, l2):
return [l for l in l1 if l not in l2]
def Add(l1, l2):
l = l1 + Sub(l2, l1)
l.sort()
return l
def Intersect(l1, l2):
return [l for l in l1 if l in l2]
#######################################################################
# RE: UNICODE STRING HANDLING
#
# Python distinguishes between the str (string of bytes)
# and unicode (string of code points) types. Most operations
# work on either one just fine, but some (like regexp matching)
# require unicode, and others (like write) require str.
#
# As befits the language, Python hides the distinction between
# unicode and str by converting between them silently, but
# *only* if all the bytes/code points involved are 7-bit ASCII.
# This means that if you're not careful, your program works
# fine on "hello, world" and fails on "hello, 世界". And of course,
# the obvious way to be careful - use static types - is unavailable.
# So the only way is trial and error to find where to put explicit
# conversions.
#
# Because more functions do implicit conversion to str (string of bytes)
# than do implicit conversion to unicode (string of code points),
# the convention in this module is to represent all text as str,
# converting to unicode only when calling a unicode-only function
# and then converting back to str as soon as possible.
def typecheck(s, t):
if type(s) != t:
raise hg_util.Abort("type check failed: %s has type %s != %s" % (repr(s), type(s), t))
# If we have to pass unicode instead of str, ustr does that conversion clearly.
def ustr(s):
typecheck(s, str)
return s.decode("utf-8")
# Even with those, Mercurial still sometimes turns unicode into str
# and then tries to use it as ascii. Change Mercurial's default.
def set_mercurial_encoding_to_utf8():
from mercurial import encoding
encoding.encoding = 'utf-8'
set_mercurial_encoding_to_utf8()
# Even with those we still run into problems.
# I tried to do things by the book but could not convince
# Mercurial to let me check in a change with UTF-8 in the
# CL description or author field, no matter how many conversions
# between str and unicode I inserted and despite changing the
# default encoding. I'm tired of this game, so set the default
# encoding for all of Python to 'utf-8', not 'ascii'.
def default_to_utf8():
import sys
stdout, __stdout__ = sys.stdout, sys.__stdout__
reload(sys) # site.py deleted setdefaultencoding; get it back
sys.stdout, sys.__stdout__ = stdout, __stdout__
sys.setdefaultencoding('utf-8')
default_to_utf8()
#######################################################################
# Status printer for long-running commands
global_status = None
def set_status(s):
# print >>sys.stderr, "\t", time.asctime(), s
global global_status
global_status = s
class StatusThread(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
def run(self):
# pause a reasonable amount of time before
# starting to display status messages, so that
# most hg commands won't ever see them.
time.sleep(30)
# now show status every 15 seconds
while True:
time.sleep(15 - time.time() % 15)
s = global_status
if s is None:
continue
if s == "":
s = "(unknown status)"
print >>sys.stderr, time.asctime(), s
def start_status_thread():
t = StatusThread()
t.setDaemon(True) # allowed to exit if t is still running
t.start()
#######################################################################
# Change list parsing.
#
# Change lists are stored in .hg/codereview/cl.nnnnnn
# where nnnnnn is the number assigned by the code review server.
# Most data about a change list is stored on the code review server
# too: the description, reviewer, and cc list are all stored there.
# The only thing in the cl.nnnnnn file is the list of relevant files.
# Also, the existence of the cl.nnnnnn file marks this repository
# as the one where the change list lives.
emptydiff = """Index: ~rietveld~placeholder~
===================================================================
diff --git a/~rietveld~placeholder~ b/~rietveld~placeholder~
new file mode 100644
"""
class CL(object):
def __init__(self, name):
typecheck(name, str)
self.name = name
self.desc = ''
self.files = []
self.reviewer = []
self.cc = []
self.url = ''
self.local = False
self.web = False
self.copied_from = None # None means current user
self.mailed = False
self.private = False
self.lgtm = []
def DiskText(self):
cl = self
s = ""
if cl.copied_from:
s += "Author: " + cl.copied_from + "\n\n"
if cl.private:
s += "Private: " + str(self.private) + "\n"
s += "Mailed: " + str(self.mailed) + "\n"
s += "Description:\n"
s += Indent(cl.desc, "\t")
s += "Files:\n"
for f in cl.files:
s += "\t" + f + "\n"
typecheck(s, str)
return s
def EditorText(self):
cl = self
s = _change_prolog
s += "\n"
if cl.copied_from:
s += "Author: " + cl.copied_from + "\n"
if cl.url != '':
s += 'URL: ' + cl.url + ' # cannot edit\n\n'
if cl.private:
s += "Private: True\n"
s += "Reviewer: " + JoinComma(cl.reviewer) + "\n"
s += "CC: " + JoinComma(cl.cc) + "\n"
s += "\n"
s += "Description:\n"
if cl.desc == '':
s += "\t<enter description here>\n"
else:
s += Indent(cl.desc, "\t")
s += "\n"
if cl.local or cl.name == "new":
s += "Files:\n"
for f in cl.files:
s += "\t" + f + "\n"
s += "\n"
typecheck(s, str)
return s
def PendingText(self, quick=False):
cl = self
s = cl.name + ":" + "\n"
s += Indent(cl.desc, "\t")
s += "\n"
if cl.copied_from:
s += "\tAuthor: " + cl.copied_from + "\n"
if not quick:
s += "\tReviewer: " + JoinComma(cl.reviewer) + "\n"
for (who, line) in cl.lgtm:
s += "\t\t" + who + ": " + line + "\n"
s += "\tCC: " + JoinComma(cl.cc) + "\n"
s += "\tFiles:\n"
for f in cl.files:
s += "\t\t" + f + "\n"
typecheck(s, str)
return s
def Flush(self, ui, repo):
if self.name == "new":
self.Upload(ui, repo, gofmt_just_warn=True, creating=True)
dir = CodeReviewDir(ui, repo)
path = dir + '/cl.' + self.name
f = open(path+'!', "w")
f.write(self.DiskText())
f.close()
if sys.platform == "win32" and os.path.isfile(path):
os.remove(path)
os.rename(path+'!', path)
if self.web and not self.copied_from:
EditDesc(self.name, desc=self.desc,
reviewers=JoinComma(self.reviewer), cc=JoinComma(self.cc),
private=self.private)
def Delete(self, ui, repo):
dir = CodeReviewDir(ui, repo)
os.unlink(dir + "/cl." + self.name)
def Subject(self):
s = line1(self.desc)
if len(s) > 60:
s = s[0:55] + "..."
if self.name != "new":
s = "code review %s: %s" % (self.name, s)
typecheck(s, str)
return s
def Upload(self, ui, repo, send_mail=False, gofmt=True, gofmt_just_warn=False, creating=False, quiet=False):
if not self.files and not creating:
ui.warn("no files in change list\n")
if ui.configbool("codereview", "force_gofmt", True) and gofmt:
CheckFormat(ui, repo, self.files, just_warn=gofmt_just_warn)
set_status("uploading CL metadata + diffs")
os.chdir(repo.root)
form_fields = [
("content_upload", "1"),
("reviewers", JoinComma(self.reviewer)),
("cc", JoinComma(self.cc)),
("description", self.desc),
("base_hashes", ""),
]
if self.name != "new":
form_fields.append(("issue", self.name))
vcs = None
# We do not include files when creating the issue,
# because we want the patch sets to record the repository
# and base revision they are diffs against. We use the patch
# set message for that purpose, but there is no message with
# the first patch set. Instead the message gets used as the
# new CL's overall subject. So omit the diffs when creating
# and then we'll run an immediate upload.
# This has the effect that every CL begins with an empty "Patch set 1".
if self.files and not creating:
vcs = MercurialVCS(upload_options, ui, repo)
data = vcs.GenerateDiff(self.files)
files = vcs.GetBaseFiles(data)
if len(data) > MAX_UPLOAD_SIZE:
uploaded_diff_file = []
form_fields.append(("separate_patches", "1"))
else:
uploaded_diff_file = [("data", "data.diff", data)]
else:
uploaded_diff_file = [("data", "data.diff", emptydiff)]
if vcs and self.name != "new":
form_fields.append(("subject", "diff -r " + vcs.base_rev + " " + ui.expandpath("default")))
else:
# First upload sets the subject for the CL itself.
form_fields.append(("subject", self.Subject()))
ctype, body = EncodeMultipartFormData(form_fields, uploaded_diff_file)
response_body = MySend("/upload", body, content_type=ctype)
patchset = None
msg = response_body
lines = msg.splitlines()
if len(lines) >= 2:
msg = lines[0]
patchset = lines[1].strip()
patches = [x.split(" ", 1) for x in lines[2:]]
if response_body.startswith("Issue updated.") and quiet:
pass
else:
ui.status(msg + "\n")
set_status("uploaded CL metadata + diffs")
if not response_body.startswith("Issue created.") and not response_body.startswith("Issue updated."):
raise hg_util.Abort("failed to update issue: " + response_body)
issue = msg[msg.rfind("/")+1:]
self.name = issue
if not self.url:
self.url = server_url_base + self.name
if not uploaded_diff_file:
set_status("uploading patches")
patches = UploadSeparatePatches(issue, rpc, patchset, data, upload_options)
if vcs:
set_status("uploading base files")
vcs.UploadBaseFiles(issue, rpc, patches, patchset, upload_options, files)
if send_mail:
set_status("sending mail")
MySend("/" + issue + "/mail", payload="")
self.web = True
set_status("flushing changes to disk")
self.Flush(ui, repo)
return
def Mail(self, ui, repo):
pmsg = "Hello " + JoinComma(self.reviewer)
if self.cc:
pmsg += " (cc: %s)" % (', '.join(self.cc),)
pmsg += ",\n"
pmsg += "\n"
repourl = ui.expandpath("default")
if not self.mailed:
pmsg += "I'd like you to review this change to\n" + repourl + "\n"
else:
pmsg += "Please take another look.\n"
typecheck(pmsg, str)
PostMessage(ui, self.name, pmsg, subject=self.Subject())
self.mailed = True
self.Flush(ui, repo)
def GoodCLName(name):
typecheck(name, str)
return re.match("^[0-9]+$", name)
def ParseCL(text, name):
typecheck(text, str)
typecheck(name, str)
sname = None
lineno = 0
sections = {
'Author': '',
'Description': '',
'Files': '',
'URL': '',
'Reviewer': '',
'CC': '',
'Mailed': '',
'Private': '',
}
for line in text.split('\n'):
lineno += 1
line = line.rstrip()
if line != '' and line[0] == '#':
continue
if line == '' or line[0] == ' ' or line[0] == '\t':
if sname == None and line != '':
return None, lineno, 'text outside section'
if sname != None:
sections[sname] += line + '\n'
continue
p = line.find(':')
if p >= 0:
s, val = line[:p].strip(), line[p+1:].strip()
if s in sections:
sname = s
if val != '':
sections[sname] += val + '\n'
continue
return None, lineno, 'malformed section header'
for k in sections:
sections[k] = StripCommon(sections[k]).rstrip()
cl = CL(name)
if sections['Author']:
cl.copied_from = sections['Author']
cl.desc = sections['Description']
for line in sections['Files'].split('\n'):
i = line.find('#')
if i >= 0:
line = line[0:i].rstrip()
line = line.strip()
if line == '':
continue
cl.files.append(line)
cl.reviewer = SplitCommaSpace(sections['Reviewer'])
cl.cc = SplitCommaSpace(sections['CC'])
cl.url = sections['URL']
if sections['Mailed'] != 'False':
# Odd default, but avoids spurious mailings when
# reading old CLs that do not have a Mailed: line.
# CLs created with this update will always have
# Mailed: False on disk.
cl.mailed = True
if sections['Private'] in ('True', 'true', 'Yes', 'yes'):
cl.private = True
if cl.desc == '<enter description here>':
cl.desc = ''
return cl, 0, ''
def SplitCommaSpace(s):
typecheck(s, str)
s = s.strip()
if s == "":
return []
return re.split(", *", s)
def CutDomain(s):
typecheck(s, str)
i = s.find('@')
if i >= 0:
s = s[0:i]
return s
def JoinComma(l):
for s in l:
typecheck(s, str)
return ", ".join(l)
def ExceptionDetail():
s = str(sys.exc_info()[0])
if s.startswith("<type '") and s.endswith("'>"):
s = s[7:-2]
elif s.startswith("<class '") and s.endswith("'>"):
s = s[8:-2]
arg = str(sys.exc_info()[1])
if len(arg) > 0:
s += ": " + arg
return s
def IsLocalCL(ui, repo, name):
return GoodCLName(name) and os.access(CodeReviewDir(ui, repo) + "/cl." + name, 0)
# Load CL from disk and/or the web.
def LoadCL(ui, repo, name, web=True):
typecheck(name, str)
set_status("loading CL " + name)
if not GoodCLName(name):
return None, "invalid CL name"
dir = CodeReviewDir(ui, repo)
path = dir + "cl." + name
if os.access(path, 0):
ff = open(path)
text = ff.read()
ff.close()
cl, lineno, err = ParseCL(text, name)
if err != "":
return None, "malformed CL data: "+err
cl.local = True
else:
cl = CL(name)
if web:
set_status("getting issue metadata from web")
d = JSONGet(ui, "/api/" + name + "?messages=true")
set_status(None)
if d is None:
return None, "cannot load CL %s from server" % (name,)
if 'owner_email' not in d or 'issue' not in d or str(d['issue']) != name:
return None, "malformed response loading CL data from code review server"
cl.dict = d
cl.reviewer = d.get('reviewers', [])
cl.cc = d.get('cc', [])
if cl.local and cl.copied_from and cl.desc:
# local copy of CL written by someone else
# and we saved a description. use that one,
# so that committers can edit the description
# before doing hg submit.
pass
else:
cl.desc = d.get('description', "")
cl.url = server_url_base + name
cl.web = True
cl.private = d.get('private', False) != False
cl.lgtm = []
for m in d.get('messages', []):
if m.get('approval', False) == True:
who = re.sub('@.*', '', m.get('sender', ''))
text = re.sub("\n(.|\n)*", '', m.get('text', ''))
cl.lgtm.append((who, text))
set_status("loaded CL " + name)
return cl, ''
class LoadCLThread(threading.Thread):
def __init__(self, ui, repo, dir, f, web):
threading.Thread.__init__(self)
self.ui = ui
self.repo = repo
self.dir = dir
self.f = f
self.web = web
self.cl = None
def run(self):
cl, err = LoadCL(self.ui, self.repo, self.f[3:], web=self.web)
if err != '':
self.ui.warn("loading "+self.dir+self.f+": " + err + "\n")
return
self.cl = cl
# Load all the CLs from this repository.
def LoadAllCL(ui, repo, web=True):
dir = CodeReviewDir(ui, repo)
m = {}
files = [f for f in os.listdir(dir) if f.startswith('cl.')]
if not files:
return m
active = []
first = True
for f in files:
t = LoadCLThread(ui, repo, dir, f, web)
t.start()
if web and first:
# first request: wait in case it needs to authenticate
# otherwise we get lots of user/password prompts
# running in parallel.
t.join()
if t.cl:
m[t.cl.name] = t.cl
first = False
else:
active.append(t)
for t in active:
t.join()
if t.cl:
m[t.cl.name] = t.cl
return m
# Find repository root. On error, ui.warn and return None
def RepoDir(ui, repo):
url = repo.url();
if not url.startswith('file:'):
ui.warn("repository %s is not in local file system\n" % (url,))
return None
url = url[5:]
if url.endswith('/'):
url = url[:-1]
typecheck(url, str)
return url
# Find (or make) code review directory. On error, ui.warn and return None
def CodeReviewDir(ui, repo):
dir = RepoDir(ui, repo)
if dir == None:
return None
dir += '/.hg/codereview/'
if not os.path.isdir(dir):
try:
os.mkdir(dir, 0700)
except:
ui.warn('cannot mkdir %s: %s\n' % (dir, ExceptionDetail()))
return None
typecheck(dir, str)
return dir
# Turn leading tabs into spaces, so that the common white space
# prefix doesn't get confused when people's editors write out
# some lines with spaces, some with tabs. Only a heuristic
# (some editors don't use 8 spaces either) but a useful one.
def TabsToSpaces(line):
i = 0
while i < len(line) and line[i] == '\t':
i += 1
return ' '*(8*i) + line[i:]
# Strip maximal common leading white space prefix from text
def StripCommon(text):
typecheck(text, str)
ws = None
for line in text.split('\n'):
line = line.rstrip()
if line == '':
continue
line = TabsToSpaces(line)
white = line[:len(line)-len(line.lstrip())]
if ws == None:
ws = white
else:
common = ''
for i in range(min(len(white), len(ws))+1):
if white[0:i] == ws[0:i]:
common = white[0:i]
ws = common
if ws == '':
break
if ws == None:
return text
t = ''
for line in text.split('\n'):
line = line.rstrip()
line = TabsToSpaces(line)
if line.startswith(ws):
line = line[len(ws):]
if line == '' and t == '':
continue
t += line + '\n'
while len(t) >= 2 and t[-2:] == '\n\n':
t = t[:-1]
typecheck(t, str)
return t
# Indent text with indent.
def Indent(text, indent):
typecheck(text, str)
typecheck(indent, str)
t = ''
for line in text.split('\n'):
t += indent + line + '\n'
typecheck(t, str)
return t
# Return the first line of l
def line1(text):
typecheck(text, str)
return text.split('\n')[0]
_change_prolog = """# Change list.
# Lines beginning with # are ignored.
# Multi-line values should be indented.
"""
desc_re = '^(.+: |(tag )?(release|weekly)\.|fix build|undo CL)'
desc_msg = '''Your CL description appears not to use the standard form.
The first line of your change description is conventionally a
one-line summary of the change, prefixed by the primary affected package,
and is used as the subject for code review mail; the rest of the description
elaborates.
Examples:
encoding/rot13: new package
math: add IsInf, IsNaN
net: fix cname in LookupHost
unicode: update to Unicode 5.0.2
'''
def promptyesno(ui, msg):
return ui.promptchoice(msg, ["&yes", "&no"], 0) == 0
def promptremove(ui, repo, f):
if promptyesno(ui, "hg remove %s (y/n)?" % (f,)):
if hg_commands.remove(ui, repo, 'path:'+f) != 0:
ui.warn("error removing %s" % (f,))
def promptadd(ui, repo, f):
if promptyesno(ui, "hg add %s (y/n)?" % (f,)):
if hg_commands.add(ui, repo, 'path:'+f) != 0:
ui.warn("error adding %s" % (f,))
def EditCL(ui, repo, cl):
set_status(None) # do not show status
s = cl.EditorText()
while True:
s = ui.edit(s, ui.username())
# We can't trust Mercurial + Python not to die before making the change,
# so, by popular demand, just scribble the most recent CL edit into
# $(hg root)/last-change so that if Mercurial does die, people
# can look there for their work.
try:
f = open(repo.root+"/last-change", "w")
f.write(s)
f.close()
except:
pass
clx, line, err = ParseCL(s, cl.name)
if err != '':
if not promptyesno(ui, "error parsing change list: line %d: %s\nre-edit (y/n)?" % (line, err)):
return "change list not modified"
continue
# Check description.
if clx.desc == '':
if promptyesno(ui, "change list should have a description\nre-edit (y/n)?"):
continue
elif re.search('<enter reason for undo>', clx.desc):
if promptyesno(ui, "change list description omits reason for undo\nre-edit (y/n)?"):
continue
elif not re.match(desc_re, clx.desc.split('\n')[0]):
if promptyesno(ui, desc_msg + "re-edit (y/n)?"):
continue
# Check file list for files that need to be hg added or hg removed
# or simply aren't understood.
pats = ['path:'+f for f in clx.files]
changed = hg_matchPattern(ui, repo, *pats, modified=True, added=True, removed=True)
deleted = hg_matchPattern(ui, repo, *pats, deleted=True)
unknown = hg_matchPattern(ui, repo, *pats, unknown=True)
ignored = hg_matchPattern(ui, repo, *pats, ignored=True)
clean = hg_matchPattern(ui, repo, *pats, clean=True)
files = []
for f in clx.files:
if f in changed:
files.append(f)
continue
if f in deleted:
promptremove(ui, repo, f)
files.append(f)
continue
if f in unknown:
promptadd(ui, repo, f)
files.append(f)
continue
if f in ignored:
ui.warn("error: %s is excluded by .hgignore; omitting\n" % (f,))
continue
if f in clean:
ui.warn("warning: %s is listed in the CL but unchanged\n" % (f,))
files.append(f)
continue
p = repo.root + '/' + f
if os.path.isfile(p):
ui.warn("warning: %s is a file but not known to hg\n" % (f,))
files.append(f)
continue
if os.path.isdir(p):
ui.warn("error: %s is a directory, not a file; omitting\n" % (f,))
continue
ui.warn("error: %s does not exist; omitting\n" % (f,))
clx.files = files
cl.desc = clx.desc
cl.reviewer = clx.reviewer
cl.cc = clx.cc
cl.files = clx.files
cl.private = clx.private
break
return ""
# For use by submit, etc. (NOT by change)
# Get change list number or list of files from command line.
# If files are given, make a new change list.
def CommandLineCL(ui, repo, pats, opts, defaultcc=None):
if len(pats) > 0 and GoodCLName(pats[0]):
if len(pats) != 1:
return None, "cannot specify change number and file names"
if opts.get('message'):
return None, "cannot use -m with existing CL"
cl, err = LoadCL(ui, repo, pats[0], web=True)
if err != "":
return None, err
else:
cl = CL("new")
cl.local = True
cl.files = ChangedFiles(ui, repo, pats, taken=Taken(ui, repo))
if not cl.files:
return None, "no files changed"
if opts.get('reviewer'):
cl.reviewer = Add(cl.reviewer, SplitCommaSpace(opts.get('reviewer')))
if opts.get('cc'):
cl.cc = Add(cl.cc, SplitCommaSpace(opts.get('cc')))
if defaultcc:
cl.cc = Add(cl.cc, defaultcc)
if cl.name == "new":
if opts.get('message'):
cl.desc = opts.get('message')
else:
err = EditCL(ui, repo, cl)
if err != '':
return None, err
return cl, ""
#######################################################################
# Change list file management
# Return list of changed files in repository that match pats.
# The patterns came from the command line, so we warn
# if they have no effect or cannot be understood.
def ChangedFiles(ui, repo, pats, taken=None):
taken = taken or {}
# Run each pattern separately so that we can warn about
# patterns that didn't do anything useful.
for p in pats:
for f in hg_matchPattern(ui, repo, p, unknown=True):
promptadd(ui, repo, f)
for f in hg_matchPattern(ui, repo, p, removed=True):
promptremove(ui, repo, f)
files = hg_matchPattern(ui, repo, p, modified=True, added=True, removed=True)
for f in files:
if f in taken:
ui.warn("warning: %s already in CL %s\n" % (f, taken[f].name))
if not files:
ui.warn("warning: %s did not match any modified files\n" % (p,))
# Again, all at once (eliminates duplicates)
l = hg_matchPattern(ui, repo, *pats, modified=True, added=True, removed=True)
l.sort()
if taken:
l = Sub(l, taken.keys())
return l
# Return list of changed files in repository that match pats and still exist.
def ChangedExistingFiles(ui, repo, pats, opts):
l = hg_matchPattern(ui, repo, *pats, modified=True, added=True)
l.sort()
return l
# Return list of files claimed by existing CLs
def Taken(ui, repo):
all = LoadAllCL(ui, repo, web=False)
taken = {}
for _, cl in all.items():
for f in cl.files:
taken[f] = cl
return taken
# Return list of changed files that are not claimed by other CLs
def DefaultFiles(ui, repo, pats):
return ChangedFiles(ui, repo, pats, taken=Taken(ui, repo))
#######################################################################
# File format checking.
def CheckFormat(ui, repo, files, just_warn=False):
set_status("running gofmt")
CheckGofmt(ui, repo, files, just_warn)
CheckTabfmt(ui, repo, files, just_warn)
# Check that gofmt run on the list of files does not change them
def CheckGofmt(ui, repo, files, just_warn):
files = gofmt_required(files)
if not files:
return
cwd = os.getcwd()
files = [RelativePath(repo.root + '/' + f, cwd) for f in files]
files = [f for f in files if os.access(f, 0)]
if not files:
return
try:
cmd = subprocess.Popen(["gofmt", "-l"] + files, shell=False, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=sys.platform != "win32")
cmd.stdin.close()
except:
raise hg_util.Abort("gofmt: " + ExceptionDetail())
data = cmd.stdout.read()
errors = cmd.stderr.read()
cmd.wait()
set_status("done with gofmt")
if len(errors) > 0:
ui.warn("gofmt errors:\n" + errors.rstrip() + "\n")
return
if len(data) > 0:
msg = "gofmt needs to format these files (run hg gofmt):\n" + Indent(data, "\t").rstrip()
if just_warn:
ui.warn("warning: " + msg + "\n")
else:
raise hg_util.Abort(msg)
return
# Check that *.[chys] files indent using tabs.
def CheckTabfmt(ui, repo, files, just_warn):
files = [f for f in files if f.startswith('src/') and re.search(r"\.[chys]$", f) and not re.search(r"\.tab\.[ch]$", f)]
if not files:
return
cwd = os.getcwd()
files = [RelativePath(repo.root + '/' + f, cwd) for f in files]
files = [f for f in files if os.access(f, 0)]
badfiles = []
for f in files:
try:
for line in open(f, 'r'):
# Four leading spaces is enough to complain about,
# except that some Plan 9 code uses four spaces as the label indent,
# so allow that.
if line.startswith(' ') and not re.match(' [A-Za-z0-9_]+:', line):
badfiles.append(f)
break
except:
# ignore cannot open file, etc.
pass
if len(badfiles) > 0:
msg = "these files use spaces for indentation (use tabs instead):\n\t" + "\n\t".join(badfiles)
if just_warn:
ui.warn("warning: " + msg + "\n")
else:
raise hg_util.Abort(msg)
return
#######################################################################
# CONTRIBUTORS file parsing
contributorsCache = None
contributorsURL = None
def ReadContributors(ui, repo):
global contributorsCache
if contributorsCache is not None:
return contributorsCache
try:
if contributorsURL is not None:
opening = contributorsURL
f = urllib2.urlopen(contributorsURL)
else:
opening = repo.root + '/CONTRIBUTORS'
f = open(repo.root + '/CONTRIBUTORS', 'r')
except:
ui.write("warning: cannot open %s: %s\n" % (opening, ExceptionDetail()))
return
contributors = {}
for line in f:
# CONTRIBUTORS is a list of lines like:
# Person <email>
# Person <email> <alt-email>
# The first email address is the one used in commit logs.
if line.startswith('#'):
continue
m = re.match(r"([^<>]+\S)\s+(<[^<>\s]+>)((\s+<[^<>\s]+>)*)\s*$", line)
if m:
name = m.group(1)
email = m.group(2)[1:-1]
contributors[email.lower()] = (name, email)
for extra in m.group(3).split():
contributors[extra[1:-1].lower()] = (name, email)
contributorsCache = contributors
return contributors
def CheckContributor(ui, repo, user=None):
set_status("checking CONTRIBUTORS file")
user, userline = FindContributor(ui, repo, user, warn=False)
if not userline:
raise hg_util.Abort("cannot find %s in CONTRIBUTORS" % (user,))
return userline
def FindContributor(ui, repo, user=None, warn=True):
if not user:
user = ui.config("ui", "username")
if not user:
raise hg_util.Abort("[ui] username is not configured in .hgrc")
user = user.lower()
m = re.match(r".*<(.*)>", user)
if m:
user = m.group(1)
contributors = ReadContributors(ui, repo)
if user not in contributors:
if warn:
ui.warn("warning: cannot find %s in CONTRIBUTORS\n" % (user,))
return user, None
user, email = contributors[user]
return email, "%s <%s>" % (user, email)
#######################################################################
# Mercurial helper functions.
# Read http://mercurial.selenic.com/wiki/MercurialApi before writing any of these.
# We use the ui.pushbuffer/ui.popbuffer + hg_commands.xxx tricks for all interaction
# with Mercurial. It has proved the most stable as they make changes.
hgversion = hg_util.version()
# We require Mercurial 1.9 and suggest Mercurial 2.0.
# The details of the scmutil package changed then,
# so allowing earlier versions would require extra band-aids below.
# Ubuntu 11.10 ships with Mercurial 1.9.1 as the default version.
hg_required = "1.9"
hg_suggested = "2.0"
old_message = """
The code review extension requires Mercurial """+hg_required+""" or newer.
You are using Mercurial """+hgversion+""".
To install a new Mercurial, use
sudo easy_install mercurial=="""+hg_suggested+"""
or visit http://mercurial.selenic.com/downloads/.
"""
linux_message = """
You may need to clear your current Mercurial installation by running:
sudo apt-get remove mercurial mercurial-common
sudo rm -rf /etc/mercurial
"""
if hgversion < hg_required:
msg = old_message
if os.access("/etc/mercurial", 0):
msg += linux_message
raise hg_util.Abort(msg)
from mercurial.hg import clean as hg_clean
from mercurial import cmdutil as hg_cmdutil
from mercurial import error as hg_error
from mercurial import match as hg_match
from mercurial import node as hg_node
class uiwrap(object):
def __init__(self, ui):
self.ui = ui
ui.pushbuffer()
self.oldQuiet = ui.quiet
ui.quiet = True
self.oldVerbose = ui.verbose
ui.verbose = False
def output(self):
ui = self.ui
ui.quiet = self.oldQuiet
ui.verbose = self.oldVerbose
return ui.popbuffer()
def to_slash(path):
if sys.platform == "win32":
return path.replace('\\', '/')
return path
def hg_matchPattern(ui, repo, *pats, **opts):
w = uiwrap(ui)
hg_commands.status(ui, repo, *pats, **opts)
text = w.output()
ret = []
prefix = to_slash(os.path.realpath(repo.root))+'/'
for line in text.split('\n'):
f = line.split()
if len(f) > 1:
if len(pats) > 0:
# Given patterns, Mercurial shows relative to cwd
p = to_slash(os.path.realpath(f[1]))
if not p.startswith(prefix):
print >>sys.stderr, "File %s not in repo root %s.\n" % (p, prefix)
else:
ret.append(p[len(prefix):])
else:
# Without patterns, Mercurial shows relative to root (what we want)
ret.append(to_slash(f[1]))
return ret
def hg_heads(ui, repo):
w = uiwrap(ui)
hg_commands.heads(ui, repo)
return w.output()
noise = [
"",
"resolving manifests",
"searching for changes",
"couldn't find merge tool hgmerge",
"adding changesets",
"adding manifests",
"adding file changes",
"all local heads known remotely",
]
def isNoise(line):
line = str(line)
for x in noise:
if line == x:
return True
return False
def hg_incoming(ui, repo):
w = uiwrap(ui)
ret = hg_commands.incoming(ui, repo, force=False, bundle="")
if ret and ret != 1:
raise hg_util.Abort(ret)
return w.output()
def hg_log(ui, repo, **opts):
for k in ['date', 'keyword', 'rev', 'user']:
if not opts.has_key(k):
opts[k] = ""
w = uiwrap(ui)
ret = hg_commands.log(ui, repo, **opts)
if ret:
raise hg_util.Abort(ret)
return w.output()
def hg_outgoing(ui, repo, **opts):
w = uiwrap(ui)
ret = hg_commands.outgoing(ui, repo, **opts)
if ret and ret != 1:
raise hg_util.Abort(ret)
return w.output()
def hg_pull(ui, repo, **opts):
w = uiwrap(ui)
ui.quiet = False
ui.verbose = True # for file list
err = hg_commands.pull(ui, repo, **opts)
for line in w.output().split('\n'):
if isNoise(line):
continue
if line.startswith('moving '):
line = 'mv ' + line[len('moving '):]
if line.startswith('getting ') and line.find(' to ') >= 0:
line = 'mv ' + line[len('getting '):]
if line.startswith('getting '):
line = '+ ' + line[len('getting '):]
if line.startswith('removing '):
line = '- ' + line[len('removing '):]
ui.write(line + '\n')
return err
def hg_push(ui, repo, **opts):
w = uiwrap(ui)
ui.quiet = False
ui.verbose = True
err = hg_commands.push(ui, repo, **opts)
for line in w.output().split('\n'):
if not isNoise(line):
ui.write(line + '\n')
return err
def hg_commit(ui, repo, *pats, **opts):
return hg_commands.commit(ui, repo, *pats, **opts)
#######################################################################
# Mercurial precommit hook to disable commit except through this interface.
commit_okay = False
def precommithook(ui, repo, **opts):
if commit_okay:
return False # False means okay.
ui.write("\ncodereview extension enabled; use mail, upload, or submit instead of commit\n\n")
return True
#######################################################################
# @clnumber file pattern support
# We replace scmutil.match with the MatchAt wrapper to add the @clnumber pattern.
match_repo = None
match_ui = None
match_orig = None
def InstallMatch(ui, repo):
global match_repo
global match_ui
global match_orig
match_ui = ui
match_repo = repo
from mercurial import scmutil
match_orig = scmutil.match
scmutil.match = MatchAt
def MatchAt(ctx, pats=None, opts=None, globbed=False, default='relpath'):
taken = []
files = []
pats = pats or []
opts = opts or {}
for p in pats:
if p.startswith('@'):
taken.append(p)
clname = p[1:]
if clname == "default":
files = DefaultFiles(match_ui, match_repo, [])
else:
if not GoodCLName(clname):
raise hg_util.Abort("invalid CL name " + clname)
cl, err = LoadCL(match_repo.ui, match_repo, clname, web=False)
if err != '':
raise hg_util.Abort("loading CL " + clname + ": " + err)
if not cl.files:
raise hg_util.Abort("no files in CL " + clname)
files = Add(files, cl.files)
pats = Sub(pats, taken) + ['path:'+f for f in files]
# work-around for http://selenic.com/hg/rev/785bbc8634f8
if not hasattr(ctx, 'match'):
ctx = ctx[None]
return match_orig(ctx, pats=pats, opts=opts, globbed=globbed, default=default)
#######################################################################
# Commands added by code review extension.
# As of Mercurial 2.1 the commands are all required to return integer
# exit codes, whereas earlier versions allowed returning arbitrary strings
# to be printed as errors. We wrap the old functions to make sure we
# always return integer exit codes now. Otherwise Mercurial dies
# with a TypeError traceback (unsupported operand type(s) for &: 'str' and 'int').
# Introduce a Python decorator to convert old functions to the new
# stricter convention.
def hgcommand(f):
def wrapped(ui, repo, *pats, **opts):
err = f(ui, repo, *pats, **opts)
if type(err) is int:
return err
if not err:
return 0
raise hg_util.Abort(err)
return wrapped
#######################################################################
# hg change
@hgcommand
def change(ui, repo, *pats, **opts):
"""create, edit or delete a change list
Create, edit or delete a change list.
A change list is a group of files to be reviewed and submitted together,
plus a textual description of the change.
Change lists are referred to by simple alphanumeric names.
Changes must be reviewed before they can be submitted.
In the absence of options, the change command opens the
change list for editing in the default editor.
Deleting a change with the -d or -D flag does not affect
the contents of the files listed in that change. To revert
the files listed in a change, use
hg revert @123456
before running hg change -d 123456.
"""
if codereview_disabled:
return codereview_disabled
dirty = {}
if len(pats) > 0 and GoodCLName(pats[0]):
name = pats[0]
if len(pats) != 1:
return "cannot specify CL name and file patterns"
pats = pats[1:]
cl, err = LoadCL(ui, repo, name, web=True)
if err != '':
return err
if not cl.local and (opts["stdin"] or not opts["stdout"]):
return "cannot change non-local CL " + name
else:
name = "new"
cl = CL("new")
if repo[None].branch() != "default":
return "cannot create CL outside default branch"
dirty[cl] = True
files = ChangedFiles(ui, repo, pats, taken=Taken(ui, repo))
if opts["delete"] or opts["deletelocal"]:
if opts["delete"] and opts["deletelocal"]:
return "cannot use -d and -D together"
flag = "-d"
if opts["deletelocal"]:
flag = "-D"
if name == "new":
return "cannot use "+flag+" with file patterns"
if opts["stdin"] or opts["stdout"]:
return "cannot use "+flag+" with -i or -o"
if not cl.local:
return "cannot change non-local CL " + name
if opts["delete"]:
if cl.copied_from:
return "original author must delete CL; hg change -D will remove locally"
PostMessage(ui, cl.name, "*** Abandoned ***", send_mail=cl.mailed)
EditDesc(cl.name, closed=True, private=cl.private)
cl.Delete(ui, repo)
return
if opts["stdin"]:
s = sys.stdin.read()
clx, line, err = ParseCL(s, name)
if err != '':
return "error parsing change list: line %d: %s" % (line, err)
if clx.desc is not None:
cl.desc = clx.desc;
dirty[cl] = True
if clx.reviewer is not None:
cl.reviewer = clx.reviewer
dirty[cl] = True
if clx.cc is not None:
cl.cc = clx.cc
dirty[cl] = True
if clx.files is not None:
cl.files = clx.files
dirty[cl] = True
if clx.private != cl.private:
cl.private = clx.private
dirty[cl] = True
if not opts["stdin"] and not opts["stdout"]:
if name == "new":
cl.files = files
err = EditCL(ui, repo, cl)
if err != "":
return err
dirty[cl] = True
for d, _ in dirty.items():
name = d.name
d.Flush(ui, repo)
if name == "new":
d.Upload(ui, repo, quiet=True)
if opts["stdout"]:
ui.write(cl.EditorText())
elif opts["pending"]:
ui.write(cl.PendingText())
elif name == "new":
if ui.quiet:
ui.write(cl.name)
else:
ui.write("CL created: " + cl.url + "\n")
return
#######################################################################
# hg code-login (broken?)
@hgcommand
def code_login(ui, repo, **opts):
"""log in to code review server
Logs in to the code review server, saving a cookie in
a file in your home directory.
"""
if codereview_disabled:
return codereview_disabled
MySend(None)
#######################################################################
# hg clpatch / undo / release-apply / download
# All concerned with applying or unapplying patches to the repository.
@hgcommand
def clpatch(ui, repo, clname, **opts):
"""import a patch from the code review server
Imports a patch from the code review server into the local client.
If the local client has already modified any of the files that the
patch modifies, this command will refuse to apply the patch.
Submitting an imported patch will keep the original author's
name as the Author: line but add your own name to a Committer: line.
"""
if repo[None].branch() != "default":
return "cannot run hg clpatch outside default branch"
return clpatch_or_undo(ui, repo, clname, opts, mode="clpatch")
@hgcommand
def undo(ui, repo, clname, **opts):
"""undo the effect of a CL
Creates a new CL that undoes an earlier CL.
After creating the CL, opens the CL text for editing so that
you can add the reason for the undo to the description.
"""
if repo[None].branch() != "default":
return "cannot run hg undo outside default branch"
return clpatch_or_undo(ui, repo, clname, opts, mode="undo")
@hgcommand
def release_apply(ui, repo, clname, **opts):
"""apply a CL to the release branch
Creates a new CL copying a previously committed change
from the main branch to the release branch.
The current client must either be clean or already be in
the release branch.
The release branch must be created by starting with a
clean client, disabling the code review plugin, and running:
hg update weekly.YYYY-MM-DD
hg branch release-branch.rNN
hg commit -m 'create release-branch.rNN'
hg push --new-branch
Then re-enable the code review plugin.
People can test the release branch by running
hg update release-branch.rNN
in a clean client. To return to the normal tree,
hg update default
Move changes since the weekly into the release branch
using hg release-apply followed by the usual code review
process and hg submit.
When it comes time to tag the release, record the
final long-form tag of the release-branch.rNN
in the *default* branch's .hgtags file. That is, run
hg update default
and then edit .hgtags as you would for a weekly.
"""
c = repo[None]
if not releaseBranch:
return "no active release branches"
if c.branch() != releaseBranch:
if c.modified() or c.added() or c.removed():
raise hg_util.Abort("uncommitted local changes - cannot switch branches")
err = hg_clean(repo, releaseBranch)
if err:
return err
try:
err = clpatch_or_undo(ui, repo, clname, opts, mode="backport")
if err:
raise hg_util.Abort(err)
except Exception, e:
hg_clean(repo, "default")
raise e
return None
def rev2clname(rev):
# Extract CL name from revision description.
# The last line in the description that is a codereview URL is the real one.
# Earlier lines might be part of the user-written description.
all = re.findall('(?m)^http://codereview.appspot.com/([0-9]+)$', rev.description())
if len(all) > 0:
return all[-1]
return ""
undoHeader = """undo CL %s / %s
<enter reason for undo>
««« original CL description
"""
undoFooter = """
»»»
"""
backportHeader = """[%s] %s
««« CL %s / %s
"""
backportFooter = """
»»»
"""
# Implementation of clpatch/undo.
def clpatch_or_undo(ui, repo, clname, opts, mode):
if codereview_disabled:
return codereview_disabled
if mode == "undo" or mode == "backport":
# Find revision in Mercurial repository.
# Assume CL number is 7+ decimal digits.
# Otherwise is either change log sequence number (fewer decimal digits),
# hexadecimal hash, or tag name.
# Mercurial will fall over long before the change log
# sequence numbers get to be 7 digits long.
if re.match('^[0-9]{7,}$', clname):
found = False
for r in hg_log(ui, repo, keyword="codereview.appspot.com/"+clname, limit=100, template="{node}\n").split():
rev = repo[r]
# Last line with a code review URL is the actual review URL.
# Earlier ones might be part of the CL description.
n = rev2clname(rev)
if n == clname:
found = True
break
if not found:
return "cannot find CL %s in local repository" % clname
else:
rev = repo[clname]
if not rev:
return "unknown revision %s" % clname
clname = rev2clname(rev)
if clname == "":
return "cannot find CL name in revision description"
# Create fresh CL and start with patch that would reverse the change.
vers = hg_node.short(rev.node())
cl = CL("new")
desc = str(rev.description())
if mode == "undo":
cl.desc = (undoHeader % (clname, vers)) + desc + undoFooter
else:
cl.desc = (backportHeader % (releaseBranch, line1(desc), clname, vers)) + desc + undoFooter
v1 = vers
v0 = hg_node.short(rev.parents()[0].node())
if mode == "undo":
arg = v1 + ":" + v0
else:
vers = v0
arg = v0 + ":" + v1
patch = RunShell(["hg", "diff", "--git", "-r", arg])
else: # clpatch
cl, vers, patch, err = DownloadCL(ui, repo, clname)
if err != "":
return err
if patch == emptydiff:
return "codereview issue %s has no diff" % clname
# find current hg version (hg identify)
ctx = repo[None]
parents = ctx.parents()
id = '+'.join([hg_node.short(p.node()) for p in parents])
# if version does not match the patch version,
# try to update the patch line numbers.
if vers != "" and id != vers:
# "vers in repo" gives the wrong answer
# on some versions of Mercurial. Instead, do the actual
# lookup and catch the exception.
try:
repo[vers].description()
except:
return "local repository is out of date; sync to get %s" % (vers)
patch1, err = portPatch(repo, patch, vers, id)
if err != "":
if not opts["ignore_hgpatch_failure"]:
return "codereview issue %s is out of date: %s (%s->%s)" % (clname, err, vers, id)
else:
patch = patch1
argv = ["hgpatch"]
if opts["no_incoming"] or mode == "backport":
argv += ["--checksync=false"]
try:
cmd = subprocess.Popen(argv, shell=False, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=None, close_fds=sys.platform != "win32")
except:
return "hgpatch: " + ExceptionDetail() + "\nInstall hgpatch with:\n$ go get code.google.com/p/go.codereview/cmd/hgpatch\n"
out, err = cmd.communicate(patch)
if cmd.returncode != 0 and not opts["ignore_hgpatch_failure"]:
return "hgpatch failed"
cl.local = True
cl.files = out.strip().split()
if not cl.files and not opts["ignore_hgpatch_failure"]:
return "codereview issue %s has no changed files" % clname
files = ChangedFiles(ui, repo, [])
extra = Sub(cl.files, files)
if extra:
ui.warn("warning: these files were listed in the patch but not changed:\n\t" + "\n\t".join(extra) + "\n")
cl.Flush(ui, repo)
if mode == "undo":
err = EditCL(ui, repo, cl)
if err != "":
return "CL created, but error editing: " + err
cl.Flush(ui, repo)
else:
ui.write(cl.PendingText() + "\n")
# portPatch rewrites patch from being a patch against
# oldver to being a patch against newver.
def portPatch(repo, patch, oldver, newver):
lines = patch.splitlines(True) # True = keep \n
delta = None
for i in range(len(lines)):
line = lines[i]
if line.startswith('--- a/'):
file = line[6:-1]
delta = fileDeltas(repo, file, oldver, newver)
if not delta or not line.startswith('@@ '):
continue
# @@ -x,y +z,w @@ means the patch chunk replaces
# the original file's line numbers x up to x+y with the
# line numbers z up to z+w in the new file.
# Find the delta from x in the original to the same
# line in the current version and add that delta to both
# x and z.
m = re.match('@@ -([0-9]+),([0-9]+) \+([0-9]+),([0-9]+) @@', line)
if not m:
return None, "error parsing patch line numbers"
n1, len1, n2, len2 = int(m.group(1)), int(m.group(2)), int(m.group(3)), int(m.group(4))
d, err = lineDelta(delta, n1, len1)
if err != "":
return "", err
n1 += d
n2 += d
lines[i] = "@@ -%d,%d +%d,%d @@\n" % (n1, len1, n2, len2)
newpatch = ''.join(lines)
return newpatch, ""
# fileDelta returns the line number deltas for the given file's
# changes from oldver to newver.
# The deltas are a list of (n, len, newdelta) triples that say
# lines [n, n+len) were modified, and after that range the
# line numbers are +newdelta from what they were before.
def fileDeltas(repo, file, oldver, newver):
cmd = ["hg", "diff", "--git", "-r", oldver + ":" + newver, "path:" + file]
data = RunShell(cmd, silent_ok=True)
deltas = []
for line in data.splitlines():
m = re.match('@@ -([0-9]+),([0-9]+) \+([0-9]+),([0-9]+) @@', line)
if not m:
continue
n1, len1, n2, len2 = int(m.group(1)), int(m.group(2)), int(m.group(3)), int(m.group(4))
deltas.append((n1, len1, n2+len2-(n1+len1)))
return deltas
# lineDelta finds the appropriate line number delta to apply to the lines [n, n+len).
# It returns an error if those lines were rewritten by the patch.
def lineDelta(deltas, n, len):
d = 0
for (old, oldlen, newdelta) in deltas:
if old >= n+len:
break
if old+len > n:
return 0, "patch and recent changes conflict"
d = newdelta
return d, ""
@hgcommand
def download(ui, repo, clname, **opts):
"""download a change from the code review server
Download prints a description of the given change list
followed by its diff, downloaded from the code review server.
"""
if codereview_disabled:
return codereview_disabled
cl, vers, patch, err = DownloadCL(ui, repo, clname)
if err != "":
return err
ui.write(cl.EditorText() + "\n")
ui.write(patch + "\n")
return
#######################################################################
# hg file
@hgcommand
def file(ui, repo, clname, pat, *pats, **opts):
"""assign files to or remove files from a change list
Assign files to or (with -d) remove files from a change list.
The -d option only removes files from the change list.
It does not edit them or remove them from the repository.
"""
if codereview_disabled:
return codereview_disabled
pats = tuple([pat] + list(pats))
if not GoodCLName(clname):
return "invalid CL name " + clname
dirty = {}
cl, err = LoadCL(ui, repo, clname, web=False)
if err != '':
return err
if not cl.local:
return "cannot change non-local CL " + clname
files = ChangedFiles(ui, repo, pats)
if opts["delete"]:
oldfiles = Intersect(files, cl.files)
if oldfiles:
if not ui.quiet:
ui.status("# Removing files from CL. To undo:\n")
ui.status("# cd %s\n" % (repo.root))
for f in oldfiles:
ui.status("# hg file %s %s\n" % (cl.name, f))
cl.files = Sub(cl.files, oldfiles)
cl.Flush(ui, repo)
else:
ui.status("no such files in CL")
return
if not files:
return "no such modified files"
files = Sub(files, cl.files)
taken = Taken(ui, repo)
warned = False
for f in files:
if f in taken:
if not warned and not ui.quiet:
ui.status("# Taking files from other CLs. To undo:\n")
ui.status("# cd %s\n" % (repo.root))
warned = True
ocl = taken[f]
if not ui.quiet:
ui.status("# hg file %s %s\n" % (ocl.name, f))
if ocl not in dirty:
ocl.files = Sub(ocl.files, files)
dirty[ocl] = True
cl.files = Add(cl.files, files)
dirty[cl] = True
for d, _ in dirty.items():
d.Flush(ui, repo)
return
#######################################################################
# hg gofmt
@hgcommand
def gofmt(ui, repo, *pats, **opts):
"""apply gofmt to modified files
Applies gofmt to the modified files in the repository that match
the given patterns.
"""
if codereview_disabled:
return codereview_disabled
files = ChangedExistingFiles(ui, repo, pats, opts)
files = gofmt_required(files)
if not files:
return "no modified go files"
cwd = os.getcwd()
files = [RelativePath(repo.root + '/' + f, cwd) for f in files]
try:
cmd = ["gofmt", "-l"]
if not opts["list"]:
cmd += ["-w"]
if os.spawnvp(os.P_WAIT, "gofmt", cmd + files) != 0:
raise hg_util.Abort("gofmt did not exit cleanly")
except hg_error.Abort, e:
raise
except:
raise hg_util.Abort("gofmt: " + ExceptionDetail())
return
def gofmt_required(files):
return [f for f in files if (not f.startswith('test/') or f.startswith('test/bench/')) and f.endswith('.go')]
#######################################################################
# hg mail
@hgcommand
def mail(ui, repo, *pats, **opts):
"""mail a change for review
Uploads a patch to the code review server and then sends mail
to the reviewer and CC list asking for a review.
"""
if codereview_disabled:
return codereview_disabled
cl, err = CommandLineCL(ui, repo, pats, opts, defaultcc=defaultcc)
if err != "":
return err
cl.Upload(ui, repo, gofmt_just_warn=True)
if not cl.reviewer:
# If no reviewer is listed, assign the review to defaultcc.
# This makes sure that it appears in the
# codereview.appspot.com/user/defaultcc
# page, so that it doesn't get dropped on the floor.
if not defaultcc:
return "no reviewers listed in CL"
cl.cc = Sub(cl.cc, defaultcc)
cl.reviewer = defaultcc
cl.Flush(ui, repo)
if cl.files == []:
return "no changed files, not sending mail"
cl.Mail(ui, repo)
#######################################################################
# hg p / hg pq / hg ps / hg pending
@hgcommand
def ps(ui, repo, *pats, **opts):
"""alias for hg p --short
"""
opts['short'] = True
return pending(ui, repo, *pats, **opts)
@hgcommand
def pq(ui, repo, *pats, **opts):
"""alias for hg p --quick
"""
opts['quick'] = True
return pending(ui, repo, *pats, **opts)
@hgcommand
def pending(ui, repo, *pats, **opts):
"""show pending changes
Lists pending changes followed by a list of unassigned but modified files.
"""
if codereview_disabled:
return codereview_disabled
quick = opts.get('quick', False)
short = opts.get('short', False)
m = LoadAllCL(ui, repo, web=not quick and not short)
names = m.keys()
names.sort()
for name in names:
cl = m[name]
if short:
ui.write(name + "\t" + line1(cl.desc) + "\n")
else:
ui.write(cl.PendingText(quick=quick) + "\n")
if short:
return
files = DefaultFiles(ui, repo, [])
if len(files) > 0:
s = "Changed files not in any CL:\n"
for f in files:
s += "\t" + f + "\n"
ui.write(s)
#######################################################################
# hg submit
def need_sync():
raise hg_util.Abort("local repository out of date; must sync before submit")
@hgcommand
def submit(ui, repo, *pats, **opts):
"""submit change to remote repository
Submits change to remote repository.
Bails out if the local repository is not in sync with the remote one.
"""
if codereview_disabled:
return codereview_disabled
# We already called this on startup but sometimes Mercurial forgets.
set_mercurial_encoding_to_utf8()
if not opts["no_incoming"] and hg_incoming(ui, repo):
need_sync()
cl, err = CommandLineCL(ui, repo, pats, opts, defaultcc=defaultcc)
if err != "":
return err
user = None
if cl.copied_from:
user = cl.copied_from
userline = CheckContributor(ui, repo, user)
typecheck(userline, str)
about = ""
if cl.reviewer:
about += "R=" + JoinComma([CutDomain(s) for s in cl.reviewer]) + "\n"
if opts.get('tbr'):
tbr = SplitCommaSpace(opts.get('tbr'))
cl.reviewer = Add(cl.reviewer, tbr)
about += "TBR=" + JoinComma([CutDomain(s) for s in tbr]) + "\n"
if cl.cc:
about += "CC=" + JoinComma([CutDomain(s) for s in cl.cc]) + "\n"
if not cl.reviewer:
return "no reviewers listed in CL"
if not cl.local:
return "cannot submit non-local CL"
# upload, to sync current patch and also get change number if CL is new.
if not cl.copied_from:
cl.Upload(ui, repo, gofmt_just_warn=True)
# check gofmt for real; allowed upload to warn in order to save CL.
cl.Flush(ui, repo)
CheckFormat(ui, repo, cl.files)
about += "%s%s\n" % (server_url_base, cl.name)
if cl.copied_from:
about += "\nCommitter: " + CheckContributor(ui, repo, None) + "\n"
typecheck(about, str)
if not cl.mailed and not cl.copied_from: # in case this is TBR
cl.Mail(ui, repo)
# submit changes locally
message = cl.desc.rstrip() + "\n\n" + about
typecheck(message, str)
set_status("pushing " + cl.name + " to remote server")
if hg_outgoing(ui, repo):
raise hg_util.Abort("local repository corrupt or out-of-phase with remote: found outgoing changes")
old_heads = len(hg_heads(ui, repo).split())
global commit_okay
commit_okay = True
ret = hg_commit(ui, repo, *['path:'+f for f in cl.files], message=message, user=userline)
commit_okay = False
if ret:
return "nothing changed"
node = repo["-1"].node()
# push to remote; if it fails for any reason, roll back
try:
new_heads = len(hg_heads(ui, repo).split())
if old_heads != new_heads and not (old_heads == 0 and new_heads == 1):
# Created new head, so we weren't up to date.
need_sync()
# Push changes to remote. If it works, we're committed. If not, roll back.
try:
hg_push(ui, repo)
except hg_error.Abort, e:
if e.message.find("push creates new heads") >= 0:
# Remote repository had changes we missed.
need_sync()
raise
except:
real_rollback()
raise
# We're committed. Upload final patch, close review, add commit message.
changeURL = hg_node.short(node)
url = ui.expandpath("default")
m = re.match("(^https?://([^@/]+@)?([^.]+)\.googlecode\.com/hg/?)" + "|" +
"(^https?://([^@/]+@)?code\.google\.com/p/([^/.]+)(\.[^./]+)?/?)", url)
if m:
if m.group(1): # prj.googlecode.com/hg/ case
changeURL = "http://code.google.com/p/%s/source/detail?r=%s" % (m.group(3), changeURL)
elif m.group(4) and m.group(7): # code.google.com/p/prj.subrepo/ case
changeURL = "http://code.google.com/p/%s/source/detail?r=%s&repo=%s" % (m.group(6), changeURL, m.group(7)[1:])
elif m.group(4): # code.google.com/p/prj/ case
changeURL = "http://code.google.com/p/%s/source/detail?r=%s" % (m.group(6), changeURL)
else:
print >>sys.stderr, "URL: ", url
else:
print >>sys.stderr, "URL: ", url
pmsg = "*** Submitted as " + changeURL + " ***\n\n" + message
# When posting, move reviewers to CC line,
# so that the issue stops showing up in their "My Issues" page.
PostMessage(ui, cl.name, pmsg, reviewers="", cc=JoinComma(cl.reviewer+cl.cc))
if not cl.copied_from:
EditDesc(cl.name, closed=True, private=cl.private)
cl.Delete(ui, repo)
c = repo[None]
if c.branch() == releaseBranch and not c.modified() and not c.added() and not c.removed():
ui.write("switching from %s to default branch.\n" % releaseBranch)
err = hg_clean(repo, "default")
if err:
return err
return None
#######################################################################
# hg sync
@hgcommand
def sync(ui, repo, **opts):
"""synchronize with remote repository
Incorporates recent changes from the remote repository
into the local repository.
"""
if codereview_disabled:
return codereview_disabled
if not opts["local"]:
err = hg_pull(ui, repo, update=True)
if err:
return err
sync_changes(ui, repo)
def sync_changes(ui, repo):
# Look through recent change log descriptions to find
# potential references to http://.*/our-CL-number.
# Double-check them by looking at the Rietveld log.
for rev in hg_log(ui, repo, limit=100, template="{node}\n").split():
desc = repo[rev].description().strip()
for clname in re.findall('(?m)^http://(?:[^\n]+)/([0-9]+)$', desc):
if IsLocalCL(ui, repo, clname) and IsRietveldSubmitted(ui, clname, repo[rev].hex()):
ui.warn("CL %s submitted as %s; closing\n" % (clname, repo[rev]))
cl, err = LoadCL(ui, repo, clname, web=False)
if err != "":
ui.warn("loading CL %s: %s\n" % (clname, err))
continue
if not cl.copied_from:
EditDesc(cl.name, closed=True, private=cl.private)
cl.Delete(ui, repo)
# Remove files that are not modified from the CLs in which they appear.
all = LoadAllCL(ui, repo, web=False)
changed = ChangedFiles(ui, repo, [])
for cl in all.values():
extra = Sub(cl.files, changed)
if extra:
ui.warn("Removing unmodified files from CL %s:\n" % (cl.name,))
for f in extra:
ui.warn("\t%s\n" % (f,))
cl.files = Sub(cl.files, extra)
cl.Flush(ui, repo)
if not cl.files:
if not cl.copied_from:
ui.warn("CL %s has no files; delete (abandon) with hg change -d %s\n" % (cl.name, cl.name))
else:
ui.warn("CL %s has no files; delete locally with hg change -D %s\n" % (cl.name, cl.name))
return
#######################################################################
# hg upload
@hgcommand
def upload(ui, repo, name, **opts):
"""upload diffs to the code review server
Uploads the current modifications for a given change to the server.
"""
if codereview_disabled:
return codereview_disabled
repo.ui.quiet = True
cl, err = LoadCL(ui, repo, name, web=True)
if err != "":
return err
if not cl.local:
return "cannot upload non-local change"
cl.Upload(ui, repo)
print "%s%s\n" % (server_url_base, cl.name)
return
#######################################################################
# Table of commands, supplied to Mercurial for installation.
review_opts = [
('r', 'reviewer', '', 'add reviewer'),
('', 'cc', '', 'add cc'),
('', 'tbr', '', 'add future reviewer'),
('m', 'message', '', 'change description (for new change)'),
]
cmdtable = {
# The ^ means to show this command in the help text that
# is printed when running hg with no arguments.
"^change": (
change,
[
('d', 'delete', None, 'delete existing change list'),
('D', 'deletelocal', None, 'delete locally, but do not change CL on server'),
('i', 'stdin', None, 'read change list from standard input'),
('o', 'stdout', None, 'print change list to standard output'),
('p', 'pending', None, 'print pending summary to standard output'),
],
"[-d | -D] [-i] [-o] change# or FILE ..."
),
"^clpatch": (
clpatch,
[
('', 'ignore_hgpatch_failure', None, 'create CL metadata even if hgpatch fails'),
('', 'no_incoming', None, 'disable check for incoming changes'),
],
"change#"
),
# Would prefer to call this codereview-login, but then
# hg help codereview prints the help for this command
# instead of the help for the extension.
"code-login": (
code_login,
[],
"",
),
"^download": (
download,
[],
"change#"
),
"^file": (
file,
[
('d', 'delete', None, 'delete files from change list (but not repository)'),
],
"[-d] change# FILE ..."
),
"^gofmt": (
gofmt,
[
('l', 'list', None, 'list files that would change, but do not edit them'),
],
"FILE ..."
),
"^pending|p": (
pending,
[
('s', 'short', False, 'show short result form'),
('', 'quick', False, 'do not consult codereview server'),
],
"[FILE ...]"
),
"^ps": (
ps,
[],
"[FILE ...]"
),
"^pq": (
pq,
[],
"[FILE ...]"
),
"^mail": (
mail,
review_opts + [
] + hg_commands.walkopts,
"[-r reviewer] [--cc cc] [change# | file ...]"
),
"^release-apply": (
release_apply,
[
('', 'ignore_hgpatch_failure', None, 'create CL metadata even if hgpatch fails'),
('', 'no_incoming', None, 'disable check for incoming changes'),
],
"change#"
),
# TODO: release-start, release-tag, weekly-tag
"^submit": (
submit,
review_opts + [
('', 'no_incoming', None, 'disable initial incoming check (for testing)'),
] + hg_commands.walkopts + hg_commands.commitopts + hg_commands.commitopts2,
"[-r reviewer] [--cc cc] [change# | file ...]"
),
"^sync": (
sync,
[
('', 'local', None, 'do not pull changes from remote repository')
],
"[--local]",
),
"^undo": (
undo,
[
('', 'ignore_hgpatch_failure', None, 'create CL metadata even if hgpatch fails'),
('', 'no_incoming', None, 'disable check for incoming changes'),
],
"change#"
),
"^upload": (
upload,
[],
"change#"
),
}
#######################################################################
# Mercurial extension initialization
def norollback(*pats, **opts):
"""(disabled when using this extension)"""
raise hg_util.Abort("codereview extension enabled; use undo instead of rollback")
codereview_init = False
def reposetup(ui, repo):
global codereview_disabled
global defaultcc
# reposetup gets called both for the local repository
# and also for any repository we are pulling or pushing to.
# Only initialize the first time.
global codereview_init
if codereview_init:
return
codereview_init = True
# Read repository-specific options from lib/codereview/codereview.cfg or codereview.cfg.
root = ''
try:
root = repo.root
except:
# Yes, repo might not have root; see issue 959.
codereview_disabled = 'codereview disabled: repository has no root'
return
repo_config_path = ''
p1 = root + '/lib/codereview/codereview.cfg'
p2 = root + '/codereview.cfg'
if os.access(p1, os.F_OK):
repo_config_path = p1
else:
repo_config_path = p2
try:
f = open(repo_config_path)
for line in f:
if line.startswith('defaultcc:'):
defaultcc = SplitCommaSpace(line[len('defaultcc:'):])
if line.startswith('contributors:'):
global contributorsURL
contributorsURL = line[len('contributors:'):].strip()
except:
codereview_disabled = 'codereview disabled: cannot open ' + repo_config_path
return
remote = ui.config("paths", "default", "")
if remote.find("://") < 0:
raise hg_util.Abort("codereview: default path '%s' is not a URL" % (remote,))
InstallMatch(ui, repo)
RietveldSetup(ui, repo)
# Disable the Mercurial commands that might change the repository.
# Only commands in this extension are supposed to do that.
ui.setconfig("hooks", "precommit.codereview", precommithook)
# Rollback removes an existing commit. Don't do that either.
global real_rollback
real_rollback = repo.rollback
repo.rollback = norollback
#######################################################################
# Wrappers around upload.py for interacting with Rietveld
from HTMLParser import HTMLParser
# HTML form parser
class FormParser(HTMLParser):
def __init__(self):
self.map = {}
self.curtag = None
self.curdata = None
HTMLParser.__init__(self)
def handle_starttag(self, tag, attrs):
if tag == "input":
key = None
value = ''
for a in attrs:
if a[0] == 'name':
key = a[1]
if a[0] == 'value':
value = a[1]
if key is not None:
self.map[key] = value
if tag == "textarea":
key = None
for a in attrs:
if a[0] == 'name':
key = a[1]
if key is not None:
self.curtag = key
self.curdata = ''
def handle_endtag(self, tag):
if tag == "textarea" and self.curtag is not None:
self.map[self.curtag] = self.curdata
self.curtag = None
self.curdata = None
def handle_charref(self, name):
self.handle_data(unichr(int(name)))
def handle_entityref(self, name):
import htmlentitydefs
if name in htmlentitydefs.entitydefs:
self.handle_data(htmlentitydefs.entitydefs[name])
else:
self.handle_data("&" + name + ";")
def handle_data(self, data):
if self.curdata is not None:
self.curdata += data
def JSONGet(ui, path):
try:
data = MySend(path, force_auth=False)
typecheck(data, str)
d = fix_json(json.loads(data))
except:
ui.warn("JSONGet %s: %s\n" % (path, ExceptionDetail()))
return None
return d
# Clean up json parser output to match our expectations:
# * all strings are UTF-8-encoded str, not unicode.
# * missing fields are missing, not None,
# so that d.get("foo", defaultvalue) works.
def fix_json(x):
if type(x) in [str, int, float, bool, type(None)]:
pass
elif type(x) is unicode:
x = x.encode("utf-8")
elif type(x) is list:
for i in range(len(x)):
x[i] = fix_json(x[i])
elif type(x) is dict:
todel = []
for k in x:
if x[k] is None:
todel.append(k)
else:
x[k] = fix_json(x[k])
for k in todel:
del x[k]
else:
raise hg_util.Abort("unknown type " + str(type(x)) + " in fix_json")
if type(x) is str:
x = x.replace('\r\n', '\n')
return x
def IsRietveldSubmitted(ui, clname, hex):
dict = JSONGet(ui, "/api/" + clname + "?messages=true")
if dict is None:
return False
for msg in dict.get("messages", []):
text = msg.get("text", "")
m = re.match('\*\*\* Submitted as [^*]*?([0-9a-f]+) \*\*\*', text)
if m is not None and len(m.group(1)) >= 8 and hex.startswith(m.group(1)):
return True
return False
def IsRietveldMailed(cl):
for msg in cl.dict.get("messages", []):
if msg.get("text", "").find("I'd like you to review this change") >= 0:
return True
return False
def DownloadCL(ui, repo, clname):
set_status("downloading CL " + clname)
cl, err = LoadCL(ui, repo, clname, web=True)
if err != "":
return None, None, None, "error loading CL %s: %s" % (clname, err)
# Find most recent diff
diffs = cl.dict.get("patchsets", [])
if not diffs:
return None, None, None, "CL has no patch sets"
patchid = diffs[-1]
patchset = JSONGet(ui, "/api/" + clname + "/" + str(patchid))
if patchset is None:
return None, None, None, "error loading CL patchset %s/%d" % (clname, patchid)
if patchset.get("patchset", 0) != patchid:
return None, None, None, "malformed patchset information"
vers = ""
msg = patchset.get("message", "").split()
if len(msg) >= 3 and msg[0] == "diff" and msg[1] == "-r":
vers = msg[2]
diff = "/download/issue" + clname + "_" + str(patchid) + ".diff"
diffdata = MySend(diff, force_auth=False)
# Print warning if email is not in CONTRIBUTORS file.
email = cl.dict.get("owner_email", "")
if not email:
return None, None, None, "cannot find owner for %s" % (clname)
him = FindContributor(ui, repo, email)
me = FindContributor(ui, repo, None)
if him == me:
cl.mailed = IsRietveldMailed(cl)
else:
cl.copied_from = email
return cl, vers, diffdata, ""
def MySend(request_path, payload=None,
content_type="application/octet-stream",
timeout=None, force_auth=True,
**kwargs):
"""Run MySend1 maybe twice, because Rietveld is unreliable."""
try:
return MySend1(request_path, payload, content_type, timeout, force_auth, **kwargs)
except Exception, e:
if type(e) != urllib2.HTTPError or e.code != 500: # only retry on HTTP 500 error
raise
print >>sys.stderr, "Loading "+request_path+": "+ExceptionDetail()+"; trying again in 2 seconds."
time.sleep(2)
return MySend1(request_path, payload, content_type, timeout, force_auth, **kwargs)
# Like upload.py Send but only authenticates when the
# redirect is to www.google.com/accounts. This keeps
# unnecessary redirects from happening during testing.
def MySend1(request_path, payload=None,
content_type="application/octet-stream",
timeout=None, force_auth=True,
**kwargs):
"""Sends an RPC and returns the response.
Args:
request_path: The path to send the request to, eg /api/appversion/create.
payload: The body of the request, or None to send an empty request.
content_type: The Content-Type header to use.
timeout: timeout in seconds; default None i.e. no timeout.
(Note: for large requests on OS X, the timeout doesn't work right.)
kwargs: Any keyword arguments are converted into query string parameters.
Returns:
The response body, as a string.
"""
# TODO: Don't require authentication. Let the server say
# whether it is necessary.
global rpc
if rpc == None:
rpc = GetRpcServer(upload_options)
self = rpc
if not self.authenticated and force_auth:
self._Authenticate()
if request_path is None:
return
old_timeout = socket.getdefaulttimeout()
socket.setdefaulttimeout(timeout)
try:
tries = 0
while True:
tries += 1
args = dict(kwargs)
url = "http://%s%s" % (self.host, request_path)
if args:
url += "?" + urllib.urlencode(args)
req = self._CreateRequest(url=url, data=payload)
req.add_header("Content-Type", content_type)
try:
f = self.opener.open(req)
response = f.read()
f.close()
# Translate \r\n into \n, because Rietveld doesn't.
response = response.replace('\r\n', '\n')
# who knows what urllib will give us
if type(response) == unicode:
response = response.encode("utf-8")
typecheck(response, str)
return response
except urllib2.HTTPError, e:
if tries > 3:
raise
elif e.code == 401:
self._Authenticate()
elif e.code == 302:
loc = e.info()["location"]
if not loc.startswith('https://www.google.com/a') or loc.find('/ServiceLogin') < 0:
return ''
self._Authenticate()
else:
raise
finally:
socket.setdefaulttimeout(old_timeout)
def GetForm(url):
f = FormParser()
f.feed(ustr(MySend(url))) # f.feed wants unicode
f.close()
# convert back to utf-8 to restore sanity
m = {}
for k,v in f.map.items():
m[k.encode("utf-8")] = v.replace("\r\n", "\n").encode("utf-8")
return m
def EditDesc(issue, subject=None, desc=None, reviewers=None, cc=None, closed=False, private=False):
set_status("uploading change to description")
form_fields = GetForm("/" + issue + "/edit")
if subject is not None:
form_fields['subject'] = subject
if desc is not None:
form_fields['description'] = desc
if reviewers is not None:
form_fields['reviewers'] = reviewers
if cc is not None:
form_fields['cc'] = cc
if closed:
form_fields['closed'] = "checked"
if private:
form_fields['private'] = "checked"
ctype, body = EncodeMultipartFormData(form_fields.items(), [])
response = MySend("/" + issue + "/edit", body, content_type=ctype)
if response != "":
print >>sys.stderr, "Error editing description:\n" + "Sent form: \n", form_fields, "\n", response
sys.exit(2)
def PostMessage(ui, issue, message, reviewers=None, cc=None, send_mail=True, subject=None):
set_status("uploading message")
form_fields = GetForm("/" + issue + "/publish")
if reviewers is not None:
form_fields['reviewers'] = reviewers
if cc is not None:
form_fields['cc'] = cc
if send_mail:
form_fields['send_mail'] = "checked"
else:
del form_fields['send_mail']
if subject is not None:
form_fields['subject'] = subject
form_fields['message'] = message
form_fields['message_only'] = '1' # Don't include draft comments
if reviewers is not None or cc is not None:
form_fields['message_only'] = '' # Must set '' in order to override cc/reviewer
ctype = "applications/x-www-form-urlencoded"
body = urllib.urlencode(form_fields)
response = MySend("/" + issue + "/publish", body, content_type=ctype)
if response != "":
print response
sys.exit(2)
class opt(object):
pass
def RietveldSetup(ui, repo):
global force_google_account
global rpc
global server
global server_url_base
global upload_options
global verbosity
if not ui.verbose:
verbosity = 0
# Config options.
x = ui.config("codereview", "server")
if x is not None:
server = x
# TODO(rsc): Take from ui.username?
email = None
x = ui.config("codereview", "email")
if x is not None:
email = x
server_url_base = "http://" + server + "/"
testing = ui.config("codereview", "testing")
force_google_account = ui.configbool("codereview", "force_google_account", False)
upload_options = opt()
upload_options.email = email
upload_options.host = None
upload_options.verbose = 0
upload_options.description = None
upload_options.description_file = None
upload_options.reviewers = None
upload_options.cc = None
upload_options.message = None
upload_options.issue = None
upload_options.download_base = False
upload_options.revision = None
upload_options.send_mail = False
upload_options.vcs = None
upload_options.server = server
upload_options.save_cookies = True
if testing:
upload_options.save_cookies = False
upload_options.email = "test@example.com"
rpc = None
global releaseBranch
tags = repo.branchtags().keys()
if 'release-branch.r100' in tags:
# NOTE(rsc): This tags.sort is going to get the wrong
# answer when comparing release-branch.r99 with
# release-branch.r100. If we do ten releases a year
# that gives us 4 years before we have to worry about this.
raise hg_util.Abort('tags.sort needs to be fixed for release-branch.r100')
tags.sort()
for t in tags:
if t.startswith('release-branch.'):
releaseBranch = t
#######################################################################
# http://codereview.appspot.com/static/upload.py, heavily edited.
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tool for uploading diffs from a version control system to the codereview app.
Usage summary: upload.py [options] [-- diff_options]
Diff options are passed to the diff command of the underlying system.
Supported version control systems:
Git
Mercurial
Subversion
It is important for Git/Mercurial users to specify a tree/node/branch to diff
against by using the '--rev' option.
"""
# This code is derived from appcfg.py in the App Engine SDK (open source),
# and from ASPN recipe #146306.
import cookielib
import getpass
import logging
import mimetypes
import optparse
import os
import re
import socket
import subprocess
import sys
import urllib
import urllib2
import urlparse
# The md5 module was deprecated in Python 2.5.
try:
from hashlib import md5
except ImportError:
from md5 import md5
try:
import readline
except ImportError:
pass
# The logging verbosity:
# 0: Errors only.
# 1: Status messages.
# 2: Info logs.
# 3: Debug logs.
verbosity = 1
# Max size of patch or base file.
MAX_UPLOAD_SIZE = 900 * 1024
# whitelist for non-binary filetypes which do not start with "text/"
# .mm (Objective-C) shows up as application/x-freemind on my Linux box.
TEXT_MIMETYPES = [
'application/javascript',
'application/x-javascript',
'application/x-freemind'
]
def GetEmail(prompt):
"""Prompts the user for their email address and returns it.
The last used email address is saved to a file and offered up as a suggestion
to the user. If the user presses enter without typing in anything the last
used email address is used. If the user enters a new address, it is saved
for next time we prompt.
"""
last_email_file_name = os.path.expanduser("~/.last_codereview_email_address")
last_email = ""
if os.path.exists(last_email_file_name):
try:
last_email_file = open(last_email_file_name, "r")
last_email = last_email_file.readline().strip("\n")
last_email_file.close()
prompt += " [%s]" % last_email
except IOError, e:
pass
email = raw_input(prompt + ": ").strip()
if email:
try:
last_email_file = open(last_email_file_name, "w")
last_email_file.write(email)
last_email_file.close()
except IOError, e:
pass
else:
email = last_email
return email
def StatusUpdate(msg):
"""Print a status message to stdout.
If 'verbosity' is greater than 0, print the message.
Args:
msg: The string to print.
"""
if verbosity > 0:
print msg
def ErrorExit(msg):
"""Print an error message to stderr and exit."""
print >>sys.stderr, msg
sys.exit(1)
class ClientLoginError(urllib2.HTTPError):
"""Raised to indicate there was an error authenticating with ClientLogin."""
def __init__(self, url, code, msg, headers, args):
urllib2.HTTPError.__init__(self, url, code, msg, headers, None)
self.args = args
self.reason = args["Error"]
class AbstractRpcServer(object):
"""Provides a common interface for a simple RPC server."""
def __init__(self, host, auth_function, host_override=None, extra_headers={}, save_cookies=False):
"""Creates a new HttpRpcServer.
Args:
host: The host to send requests to.
auth_function: A function that takes no arguments and returns an
(email, password) tuple when called. Will be called if authentication
is required.
host_override: The host header to send to the server (defaults to host).
extra_headers: A dict of extra headers to append to every request.
save_cookies: If True, save the authentication cookies to local disk.
If False, use an in-memory cookiejar instead. Subclasses must
implement this functionality. Defaults to False.
"""
self.host = host
self.host_override = host_override
self.auth_function = auth_function
self.authenticated = False
self.extra_headers = extra_headers
self.save_cookies = save_cookies
self.opener = self._GetOpener()
if self.host_override:
logging.info("Server: %s; Host: %s", self.host, self.host_override)
else:
logging.info("Server: %s", self.host)
def _GetOpener(self):
"""Returns an OpenerDirector for making HTTP requests.
Returns:
A urllib2.OpenerDirector object.
"""
raise NotImplementedError()
def _CreateRequest(self, url, data=None):
"""Creates a new urllib request."""
logging.debug("Creating request for: '%s' with payload:\n%s", url, data)
req = urllib2.Request(url, data=data)
if self.host_override:
req.add_header("Host", self.host_override)
for key, value in self.extra_headers.iteritems():
req.add_header(key, value)
return req
def _GetAuthToken(self, email, password):
"""Uses ClientLogin to authenticate the user, returning an auth token.
Args:
email: The user's email address
password: The user's password
Raises:
ClientLoginError: If there was an error authenticating with ClientLogin.
HTTPError: If there was some other form of HTTP error.
Returns:
The authentication token returned by ClientLogin.
"""
account_type = "GOOGLE"
if self.host.endswith(".google.com") and not force_google_account:
# Needed for use inside Google.
account_type = "HOSTED"
req = self._CreateRequest(
url="https://www.google.com/accounts/ClientLogin",
data=urllib.urlencode({
"Email": email,
"Passwd": password,
"service": "ah",
"source": "rietveld-codereview-upload",
"accountType": account_type,
}),
)
try:
response = self.opener.open(req)
response_body = response.read()
response_dict = dict(x.split("=") for x in response_body.split("\n") if x)
return response_dict["Auth"]
except urllib2.HTTPError, e:
if e.code == 403:
body = e.read()
response_dict = dict(x.split("=", 1) for x in body.split("\n") if x)
raise ClientLoginError(req.get_full_url(), e.code, e.msg, e.headers, response_dict)
else:
raise
def _GetAuthCookie(self, auth_token):
"""Fetches authentication cookies for an authentication token.
Args:
auth_token: The authentication token returned by ClientLogin.
Raises:
HTTPError: If there was an error fetching the authentication cookies.
"""
# This is a dummy value to allow us to identify when we're successful.
continue_location = "http://localhost/"
args = {"continue": continue_location, "auth": auth_token}
req = self._CreateRequest("http://%s/_ah/login?%s" % (self.host, urllib.urlencode(args)))
try:
response = self.opener.open(req)
except urllib2.HTTPError, e:
response = e
if (response.code != 302 or
response.info()["location"] != continue_location):
raise urllib2.HTTPError(req.get_full_url(), response.code, response.msg, response.headers, response.fp)
self.authenticated = True
def _Authenticate(self):
"""Authenticates the user.
The authentication process works as follows:
1) We get a username and password from the user
2) We use ClientLogin to obtain an AUTH token for the user
(see http://code.google.com/apis/accounts/AuthForInstalledApps.html).
3) We pass the auth token to /_ah/login on the server to obtain an
authentication cookie. If login was successful, it tries to redirect
us to the URL we provided.
If we attempt to access the upload API without first obtaining an
authentication cookie, it returns a 401 response (or a 302) and
directs us to authenticate ourselves with ClientLogin.
"""
for i in range(3):
credentials = self.auth_function()
try:
auth_token = self._GetAuthToken(credentials[0], credentials[1])
except ClientLoginError, e:
if e.reason == "BadAuthentication":
print >>sys.stderr, "Invalid username or password."
continue
if e.reason == "CaptchaRequired":
print >>sys.stderr, (
"Please go to\n"
"https://www.google.com/accounts/DisplayUnlockCaptcha\n"
"and verify you are a human. Then try again.")
break
if e.reason == "NotVerified":
print >>sys.stderr, "Account not verified."
break
if e.reason == "TermsNotAgreed":
print >>sys.stderr, "User has not agreed to TOS."
break
if e.reason == "AccountDeleted":
print >>sys.stderr, "The user account has been deleted."
break
if e.reason == "AccountDisabled":
print >>sys.stderr, "The user account has been disabled."
break
if e.reason == "ServiceDisabled":
print >>sys.stderr, "The user's access to the service has been disabled."
break
if e.reason == "ServiceUnavailable":
print >>sys.stderr, "The service is not available; try again later."
break
raise
self._GetAuthCookie(auth_token)
return
def Send(self, request_path, payload=None,
content_type="application/octet-stream",
timeout=None,
**kwargs):
"""Sends an RPC and returns the response.
Args:
request_path: The path to send the request to, eg /api/appversion/create.
payload: The body of the request, or None to send an empty request.
content_type: The Content-Type header to use.
timeout: timeout in seconds; default None i.e. no timeout.
(Note: for large requests on OS X, the timeout doesn't work right.)
kwargs: Any keyword arguments are converted into query string parameters.
Returns:
The response body, as a string.
"""
# TODO: Don't require authentication. Let the server say
# whether it is necessary.
if not self.authenticated:
self._Authenticate()
old_timeout = socket.getdefaulttimeout()
socket.setdefaulttimeout(timeout)
try:
tries = 0
while True:
tries += 1
args = dict(kwargs)
url = "http://%s%s" % (self.host, request_path)
if args:
url += "?" + urllib.urlencode(args)
req = self._CreateRequest(url=url, data=payload)
req.add_header("Content-Type", content_type)
try:
f = self.opener.open(req)
response = f.read()
f.close()
return response
except urllib2.HTTPError, e:
if tries > 3:
raise
elif e.code == 401 or e.code == 302:
self._Authenticate()
else:
raise
finally:
socket.setdefaulttimeout(old_timeout)
class HttpRpcServer(AbstractRpcServer):
"""Provides a simplified RPC-style interface for HTTP requests."""
def _Authenticate(self):
"""Save the cookie jar after authentication."""
super(HttpRpcServer, self)._Authenticate()
if self.save_cookies:
StatusUpdate("Saving authentication cookies to %s" % self.cookie_file)
self.cookie_jar.save()
def _GetOpener(self):
"""Returns an OpenerDirector that supports cookies and ignores redirects.
Returns:
A urllib2.OpenerDirector object.
"""
opener = urllib2.OpenerDirector()
opener.add_handler(urllib2.ProxyHandler())
opener.add_handler(urllib2.UnknownHandler())
opener.add_handler(urllib2.HTTPHandler())
opener.add_handler(urllib2.HTTPDefaultErrorHandler())
opener.add_handler(urllib2.HTTPSHandler())
opener.add_handler(urllib2.HTTPErrorProcessor())
if self.save_cookies:
self.cookie_file = os.path.expanduser("~/.codereview_upload_cookies_" + server)
self.cookie_jar = cookielib.MozillaCookieJar(self.cookie_file)
if os.path.exists(self.cookie_file):
try:
self.cookie_jar.load()
self.authenticated = True
StatusUpdate("Loaded authentication cookies from %s" % self.cookie_file)
except (cookielib.LoadError, IOError):
# Failed to load cookies - just ignore them.
pass
else:
# Create an empty cookie file with mode 600
fd = os.open(self.cookie_file, os.O_CREAT, 0600)
os.close(fd)
# Always chmod the cookie file
os.chmod(self.cookie_file, 0600)
else:
# Don't save cookies across runs of update.py.
self.cookie_jar = cookielib.CookieJar()
opener.add_handler(urllib2.HTTPCookieProcessor(self.cookie_jar))
return opener
def GetRpcServer(options):
"""Returns an instance of an AbstractRpcServer.
Returns:
A new AbstractRpcServer, on which RPC calls can be made.
"""
rpc_server_class = HttpRpcServer
def GetUserCredentials():
"""Prompts the user for a username and password."""
# Disable status prints so they don't obscure the password prompt.
global global_status
st = global_status
global_status = None
email = options.email
if email is None:
email = GetEmail("Email (login for uploading to %s)" % options.server)
password = getpass.getpass("Password for %s: " % email)
# Put status back.
global_status = st
return (email, password)
# If this is the dev_appserver, use fake authentication.
host = (options.host or options.server).lower()
if host == "localhost" or host.startswith("localhost:"):
email = options.email
if email is None:
email = "test@example.com"
logging.info("Using debug user %s. Override with --email" % email)
server = rpc_server_class(
options.server,
lambda: (email, "password"),
host_override=options.host,
extra_headers={"Cookie": 'dev_appserver_login="%s:False"' % email},
save_cookies=options.save_cookies)
# Don't try to talk to ClientLogin.
server.authenticated = True
return server
return rpc_server_class(options.server, GetUserCredentials,
host_override=options.host, save_cookies=options.save_cookies)
def EncodeMultipartFormData(fields, files):
"""Encode form fields for multipart/form-data.
Args:
fields: A sequence of (name, value) elements for regular form fields.
files: A sequence of (name, filename, value) elements for data to be
uploaded as files.
Returns:
(content_type, body) ready for httplib.HTTP instance.
Source:
http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/146306
"""
BOUNDARY = '-M-A-G-I-C---B-O-U-N-D-A-R-Y-'
CRLF = '\r\n'
lines = []
for (key, value) in fields:
typecheck(key, str)
typecheck(value, str)
lines.append('--' + BOUNDARY)
lines.append('Content-Disposition: form-data; name="%s"' % key)
lines.append('')
lines.append(value)
for (key, filename, value) in files:
typecheck(key, str)
typecheck(filename, str)
typecheck(value, str)
lines.append('--' + BOUNDARY)
lines.append('Content-Disposition: form-data; name="%s"; filename="%s"' % (key, filename))
lines.append('Content-Type: %s' % GetContentType(filename))
lines.append('')
lines.append(value)
lines.append('--' + BOUNDARY + '--')
lines.append('')
body = CRLF.join(lines)
content_type = 'multipart/form-data; boundary=%s' % BOUNDARY
return content_type, body
def GetContentType(filename):
"""Helper to guess the content-type from the filename."""
return mimetypes.guess_type(filename)[0] or 'application/octet-stream'
# Use a shell for subcommands on Windows to get a PATH search.
use_shell = sys.platform.startswith("win")
def RunShellWithReturnCode(command, print_output=False,
universal_newlines=True, env=os.environ):
"""Executes a command and returns the output from stdout and the return code.
Args:
command: Command to execute.
print_output: If True, the output is printed to stdout.
If False, both stdout and stderr are ignored.
universal_newlines: Use universal_newlines flag (default: True).
Returns:
Tuple (output, return code)
"""
logging.info("Running %s", command)
p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
shell=use_shell, universal_newlines=universal_newlines, env=env)
if print_output:
output_array = []
while True:
line = p.stdout.readline()
if not line:
break
print line.strip("\n")
output_array.append(line)
output = "".join(output_array)
else:
output = p.stdout.read()
p.wait()
errout = p.stderr.read()
if print_output and errout:
print >>sys.stderr, errout
p.stdout.close()
p.stderr.close()
return output, p.returncode
def RunShell(command, silent_ok=False, universal_newlines=True,
print_output=False, env=os.environ):
data, retcode = RunShellWithReturnCode(command, print_output, universal_newlines, env)
if retcode:
ErrorExit("Got error status from %s:\n%s" % (command, data))
if not silent_ok and not data:
ErrorExit("No output from %s" % command)
return data
class VersionControlSystem(object):
"""Abstract base class providing an interface to the VCS."""
def __init__(self, options):
"""Constructor.
Args:
options: Command line options.
"""
self.options = options
def GenerateDiff(self, args):
"""Return the current diff as a string.
Args:
args: Extra arguments to pass to the diff command.
"""
raise NotImplementedError(
"abstract method -- subclass %s must override" % self.__class__)
def GetUnknownFiles(self):
"""Return a list of files unknown to the VCS."""
raise NotImplementedError(
"abstract method -- subclass %s must override" % self.__class__)
def CheckForUnknownFiles(self):
"""Show an "are you sure?" prompt if there are unknown files."""
unknown_files = self.GetUnknownFiles()
if unknown_files:
print "The following files are not added to version control:"
for line in unknown_files:
print line
prompt = "Are you sure to continue?(y/N) "
answer = raw_input(prompt).strip()
if answer != "y":
ErrorExit("User aborted")
def GetBaseFile(self, filename):
"""Get the content of the upstream version of a file.
Returns:
A tuple (base_content, new_content, is_binary, status)
base_content: The contents of the base file.
new_content: For text files, this is empty. For binary files, this is
the contents of the new file, since the diff output won't contain
information to reconstruct the current file.
is_binary: True iff the file is binary.
status: The status of the file.
"""
raise NotImplementedError(
"abstract method -- subclass %s must override" % self.__class__)
def GetBaseFiles(self, diff):
"""Helper that calls GetBase file for each file in the patch.
Returns:
A dictionary that maps from filename to GetBaseFile's tuple. Filenames
are retrieved based on lines that start with "Index:" or
"Property changes on:".
"""
files = {}
for line in diff.splitlines(True):
if line.startswith('Index:') or line.startswith('Property changes on:'):
unused, filename = line.split(':', 1)
# On Windows if a file has property changes its filename uses '\'
# instead of '/'.
filename = to_slash(filename.strip())
files[filename] = self.GetBaseFile(filename)
return files
def UploadBaseFiles(self, issue, rpc_server, patch_list, patchset, options,
files):
"""Uploads the base files (and if necessary, the current ones as well)."""
def UploadFile(filename, file_id, content, is_binary, status, is_base):
"""Uploads a file to the server."""
set_status("uploading " + filename)
file_too_large = False
if is_base:
type = "base"
else:
type = "current"
if len(content) > MAX_UPLOAD_SIZE:
print ("Not uploading the %s file for %s because it's too large." %
(type, filename))
file_too_large = True
content = ""
checksum = md5(content).hexdigest()
if options.verbose > 0 and not file_too_large:
print "Uploading %s file for %s" % (type, filename)
url = "/%d/upload_content/%d/%d" % (int(issue), int(patchset), file_id)
form_fields = [
("filename", filename),
("status", status),
("checksum", checksum),
("is_binary", str(is_binary)),
("is_current", str(not is_base)),
]
if file_too_large:
form_fields.append(("file_too_large", "1"))
if options.email:
form_fields.append(("user", options.email))
ctype, body = EncodeMultipartFormData(form_fields, [("data", filename, content)])
response_body = rpc_server.Send(url, body, content_type=ctype)
if not response_body.startswith("OK"):
StatusUpdate(" --> %s" % response_body)
sys.exit(1)
# Don't want to spawn too many threads, nor do we want to
# hit Rietveld too hard, or it will start serving 500 errors.
# When 8 works, it's no better than 4, and sometimes 8 is
# too many for Rietveld to handle.
MAX_PARALLEL_UPLOADS = 4
sema = threading.BoundedSemaphore(MAX_PARALLEL_UPLOADS)
upload_threads = []
finished_upload_threads = []
class UploadFileThread(threading.Thread):
def __init__(self, args):
threading.Thread.__init__(self)
self.args = args
def run(self):
UploadFile(*self.args)
finished_upload_threads.append(self)
sema.release()
def StartUploadFile(*args):
sema.acquire()
while len(finished_upload_threads) > 0:
t = finished_upload_threads.pop()
upload_threads.remove(t)
t.join()
t = UploadFileThread(args)
upload_threads.append(t)
t.start()
def WaitForUploads():
for t in upload_threads:
t.join()
patches = dict()
[patches.setdefault(v, k) for k, v in patch_list]
for filename in patches.keys():
base_content, new_content, is_binary, status = files[filename]
file_id_str = patches.get(filename)
if file_id_str.find("nobase") != -1:
base_content = None
file_id_str = file_id_str[file_id_str.rfind("_") + 1:]
file_id = int(file_id_str)
if base_content != None:
StartUploadFile(filename, file_id, base_content, is_binary, status, True)
if new_content != None:
StartUploadFile(filename, file_id, new_content, is_binary, status, False)
WaitForUploads()
def IsImage(self, filename):
"""Returns true if the filename has an image extension."""
mimetype = mimetypes.guess_type(filename)[0]
if not mimetype:
return False
return mimetype.startswith("image/")
def IsBinary(self, filename):
"""Returns true if the guessed mimetyped isnt't in text group."""
mimetype = mimetypes.guess_type(filename)[0]
if not mimetype:
return False # e.g. README, "real" binaries usually have an extension
# special case for text files which don't start with text/
if mimetype in TEXT_MIMETYPES:
return False
return not mimetype.startswith("text/")
class FakeMercurialUI(object):
def __init__(self):
self.quiet = True
self.output = ''
def write(self, *args, **opts):
self.output += ' '.join(args)
def copy(self):
return self
def status(self, *args, **opts):
pass
def readconfig(self, *args, **opts):
pass
def expandpath(self, *args, **opts):
return global_ui.expandpath(*args, **opts)
def configitems(self, *args, **opts):
return global_ui.configitems(*args, **opts)
def config(self, *args, **opts):
return global_ui.config(*args, **opts)
use_hg_shell = False # set to True to shell out to hg always; slower
class MercurialVCS(VersionControlSystem):
"""Implementation of the VersionControlSystem interface for Mercurial."""
def __init__(self, options, ui, repo):
super(MercurialVCS, self).__init__(options)
self.ui = ui
self.repo = repo
self.status = None
# Absolute path to repository (we can be in a subdir)
self.repo_dir = os.path.normpath(repo.root)
# Compute the subdir
cwd = os.path.normpath(os.getcwd())
assert cwd.startswith(self.repo_dir)
self.subdir = cwd[len(self.repo_dir):].lstrip(r"\/")
if self.options.revision:
self.base_rev = self.options.revision
else:
mqparent, err = RunShellWithReturnCode(['hg', 'log', '--rev', 'qparent', '--template={node}'])
if not err and mqparent != "":
self.base_rev = mqparent
else:
out = RunShell(["hg", "parents", "-q"], silent_ok=True).strip()
if not out:
# No revisions; use 0 to mean a repository with nothing.
out = "0:0"
self.base_rev = out.split(':')[1].strip()
def _GetRelPath(self, filename):
"""Get relative path of a file according to the current directory,
given its logical path in the repo."""
assert filename.startswith(self.subdir), (filename, self.subdir)
return filename[len(self.subdir):].lstrip(r"\/")
def GenerateDiff(self, extra_args):
# If no file specified, restrict to the current subdir
extra_args = extra_args or ["."]
cmd = ["hg", "diff", "--git", "-r", self.base_rev] + extra_args
data = RunShell(cmd, silent_ok=True)
svndiff = []
filecount = 0
for line in data.splitlines():
m = re.match("diff --git a/(\S+) b/(\S+)", line)
if m:
# Modify line to make it look like as it comes from svn diff.
# With this modification no changes on the server side are required
# to make upload.py work with Mercurial repos.
# NOTE: for proper handling of moved/copied files, we have to use
# the second filename.
filename = m.group(2)
svndiff.append("Index: %s" % filename)
svndiff.append("=" * 67)
filecount += 1
logging.info(line)
else:
svndiff.append(line)
if not filecount:
ErrorExit("No valid patches found in output from hg diff")
return "\n".join(svndiff) + "\n"
def GetUnknownFiles(self):
"""Return a list of files unknown to the VCS."""
args = []
status = RunShell(["hg", "status", "--rev", self.base_rev, "-u", "."],
silent_ok=True)
unknown_files = []
for line in status.splitlines():
st, fn = line.split(" ", 1)
if st == "?":
unknown_files.append(fn)
return unknown_files
def get_hg_status(self, rev, path):
# We'd like to use 'hg status -C path', but that is buggy
# (see http://mercurial.selenic.com/bts/issue3023).
# Instead, run 'hg status -C' without a path
# and skim the output for the path we want.
if self.status is None:
if use_hg_shell:
out = RunShell(["hg", "status", "-C", "--rev", rev])
else:
fui = FakeMercurialUI()
ret = hg_commands.status(fui, self.repo, *[], **{'rev': [rev], 'copies': True})
if ret:
raise hg_util.Abort(ret)
out = fui.output
self.status = out.splitlines()
for i in range(len(self.status)):
# line is
# A path
# M path
# etc
line = to_slash(self.status[i])
if line[2:] == path:
if i+1 < len(self.status) and self.status[i+1][:2] == ' ':
return self.status[i:i+2]
return self.status[i:i+1]
raise hg_util.Abort("no status for " + path)
def GetBaseFile(self, filename):
set_status("inspecting " + filename)
# "hg status" and "hg cat" both take a path relative to the current subdir
# rather than to the repo root, but "hg diff" has given us the full path
# to the repo root.
base_content = ""
new_content = None
is_binary = False
oldrelpath = relpath = self._GetRelPath(filename)
out = self.get_hg_status(self.base_rev, relpath)
status, what = out[0].split(' ', 1)
if len(out) > 1 and status == "A" and what == relpath:
oldrelpath = out[1].strip()
status = "M"
if ":" in self.base_rev:
base_rev = self.base_rev.split(":", 1)[0]
else:
base_rev = self.base_rev
if status != "A":
if use_hg_shell:
base_content = RunShell(["hg", "cat", "-r", base_rev, oldrelpath], silent_ok=True)
else:
base_content = str(self.repo[base_rev][oldrelpath].data())
is_binary = "\0" in base_content # Mercurial's heuristic
if status != "R":
new_content = open(relpath, "rb").read()
is_binary = is_binary or "\0" in new_content
if is_binary and base_content and use_hg_shell:
# Fetch again without converting newlines
base_content = RunShell(["hg", "cat", "-r", base_rev, oldrelpath],
silent_ok=True, universal_newlines=False)
if not is_binary or not self.IsImage(relpath):
new_content = None
return base_content, new_content, is_binary, status
# NOTE: The SplitPatch function is duplicated in engine.py, keep them in sync.
def SplitPatch(data):
"""Splits a patch into separate pieces for each file.
Args:
data: A string containing the output of svn diff.
Returns:
A list of 2-tuple (filename, text) where text is the svn diff output
pertaining to filename.
"""
patches = []
filename = None
diff = []
for line in data.splitlines(True):
new_filename = None
if line.startswith('Index:'):
unused, new_filename = line.split(':', 1)
new_filename = new_filename.strip()
elif line.startswith('Property changes on:'):
unused, temp_filename = line.split(':', 1)
# When a file is modified, paths use '/' between directories, however
# when a property is modified '\' is used on Windows. Make them the same
# otherwise the file shows up twice.
temp_filename = to_slash(temp_filename.strip())
if temp_filename != filename:
# File has property changes but no modifications, create a new diff.
new_filename = temp_filename
if new_filename:
if filename and diff:
patches.append((filename, ''.join(diff)))
filename = new_filename
diff = [line]
continue
if diff is not None:
diff.append(line)
if filename and diff:
patches.append((filename, ''.join(diff)))
return patches
def UploadSeparatePatches(issue, rpc_server, patchset, data, options):
"""Uploads a separate patch for each file in the diff output.
Returns a list of [patch_key, filename] for each file.
"""
patches = SplitPatch(data)
rv = []
for patch in patches:
set_status("uploading patch for " + patch[0])
if len(patch[1]) > MAX_UPLOAD_SIZE:
print ("Not uploading the patch for " + patch[0] +
" because the file is too large.")
continue
form_fields = [("filename", patch[0])]
if not options.download_base:
form_fields.append(("content_upload", "1"))
files = [("data", "data.diff", patch[1])]
ctype, body = EncodeMultipartFormData(form_fields, files)
url = "/%d/upload_patch/%d" % (int(issue), int(patchset))
print "Uploading patch for " + patch[0]
response_body = rpc_server.Send(url, body, content_type=ctype)
lines = response_body.splitlines()
if not lines or lines[0] != "OK":
StatusUpdate(" --> %s" % response_body)
sys.exit(1)
rv.append([lines[1], patch[0]])
return rv
| Python |
###########################################################################
# Copyright 2010 Jim Pulokas
#
# This file is part of focusfun.
#
# focusfun is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# focusfun is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with focusfun. If not, see <http://www.gnu.org/licenses/>.
###########################################################################
import v4l2
from fcntl import ioctl
import os
class Focuser(object):
def __init__(self, device):
if isinstance(device, int):
self.fd = device
elif isinstance(device, file):
self.fd = device.fileno()
elif isinstance(device, str):
self.fd = os.open(device, os.O_RDWR)
else:
raise ValueError('must be initialized with file name, file object, or file descriptor, not: %s' % (type(device),))
self.ctrl = v4l2.v4l2_control()
self.ctrl.id = v4l2.V4L2_CID_FOCUS_ABSOLUTE
def get(self):
ioctl(self.fd, v4l2.VIDIOC_G_CTRL, self.ctrl)
return self.ctrl.value
def set(self, value):
self.ctrl.value = value
ioctl(self.fd, v4l2.VIDIOC_S_CTRL, self.ctrl)
| Python |
###########################################################################
# Copyright 2010 Jim Pulokas
#
# This file is part of focusfun.
#
# focusfun is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# focusfun is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with focusfun. If not, see <http://www.gnu.org/licenses/>.
###########################################################################
'''
a Grabber object can grab images (currently 640x480 gray) from a
'''
import gst
import numpack
# N900 uses v4l2camsrc, others may use v4l2src
#src_element = 'v4l2src'
src_element = 'v4l2camsrc'
class Grabber(object):
def __init__(self, device):
launchstr = '''
%s name=v4l2src device="%s" !
ffmpegcolorspace name=converter !
video/x-raw-gray,bpp=8,width=640,height=480 !
appsink name=appsink max-buffers=2 sync=False
''' % (src_element, device,)
self.player = gst.parse_launch(launchstr)
self.v4l2src = self.player.get_by_name('v4l2src')
self.converter = self.player.get_by_name('converter')
self.appsink = self.player.get_by_name('appsink')
self.finalsrc = self.converter.get_static_pad('src')
self.player.set_state(gst.STATE_PLAYING)
self.v4l2_fd = self.v4l2src.get_property('device-fd')
def grab_buffer(self):
buffer = self.appsink.emit('pull-buffer')
return buffer
def grab(self):
buffer = self.grab_buffer()
caps = self.finalsrc.get_negotiated_caps()
structure = caps[0]
width = structure['width']
height = structure['height']
shape = (height, width)
a = numpack.buffer_to_array(buffer, shape)
return a
def grab_to_file(self, filename):
im = self.grab()
pil_image = numpack.array_to_image(im)
pil_image.save(filename)
def test():
g = Grabber('/dev/video0')
a = g.grab()
print 'Shape:', a.shape
print 'First few elements:', a[0][:10]
if __name__ == '__main__':
test()
| Python |
#!/usr/bin/env python
###########################################################################
# Copyright 2010 Jim Pulokas
#
# This file is part of focusfun.
#
# focusfun is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# focusfun is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with focusfun. If not, see <http://www.gnu.org/licenses/>.
###########################################################################
import Image
import glob
import numpack
def range_diff(prefix):
files = glob.glob('%s-???-???.png' % (prefix,))
files.sort()
file0 = files[0]
im0 = Image.open(file0)
im0 = numpack.image_to_array(im0)
im0 = numpack.to_float(im0)
for i,file in enumerate(files[1:]):
file1 = file
im1 = Image.open(file1)
im1 = numpack.image_to_array(im1)
im1 = numpack.to_float(im1)
diff = numpack.num.absolute(im1 - im0)
diffim = numpack.array_to_image(diff)
diffim.save('%s-diff-%03d.png' % (prefix, i))
diff = numpack.num.sum(numpack.num.ravel(diff))
print diff, file0, file1
file0 = file1
im0 = im1
if __name__ == '__main__':
import sys
prefix = sys.argv[1]
range_diff(prefix)
| Python |
#!/usr/bin/env python
###########################################################################
# Copyright 2010 Jim Pulokas
#
# This file is part of focusfun.
#
# focusfun is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# focusfun is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with focusfun. If not, see <http://www.gnu.org/licenses/>.
###########################################################################
#
# This module is based on another one that I was writing to create a general
# purpose v4l2 class. I never really finished that, so here I just hacked
# it to print out some info about focusing capabilities.
#
import os
import v4l2
from fcntl import ioctl
capability_fields = ['bus_info', 'capabilities', 'card', 'driver', 'version']
queryctrl_fields = ['default', 'flags', 'id', 'maximum', 'minimum', 'name', 'step', 'type']
queryctrl_flags_short = ['DISABLED', 'GRABBED', 'READ_ONLY', 'UPDATE', 'INACTIVE', 'SLIDER']
queryctrl_flags = ['V4L2_CTRL_FLAG_' + flag for flag in queryctrl_flags_short]
name_cid = {}
cid_name = {}
for key,value in v4l2.__dict__.items():
if key.startswith('V4L2_CID_'):
name_cid[key] = value
cid_name[value] = key
def flags_to_list(flags):
flaglist = []
for flag in queryctrl_flags:
flagint = getattr(v4l2, flag)
if flagint & flags:
flaglist.append(flag)
return flaglist
class MenuType(object):
def __init__(self, dev, qctrl):
self.max = qctrl.maximum
self.choices = {}
self.ichoices = {}
for i in range(self.max+1):
q = v4l2.v4l2_querymenu()
q.id = qctrl.id
q.index = i
ioctl(dev, v4l2.VIDIOC_QUERYMENU, q)
self.choices[q.name] = i
self.ichoices[i] = q.name
def __call__(self, arg):
'takes name or index, returns index'
if isinstance(arg, int):
if 0 <= arg <= self.max:
return arg
elif arg in self.choices:
return self.choices[arg]
raise ValueError('expected name or index of a menu choice')
def __str__(self):
names = [self.ichoices[i] for i in range(self.max+1)]
return 'Menu%s' % (names,)
class ButtonType(object):
pass
class Device(object):
def __init__(self, filename):
self.dev = os.open(filename, os.O_RDONLY)
self.controls = self.queryctrl_all()
def querycap(self):
cp = v4l2.v4l2_capability()
ioctl(self.dev, v4l2.VIDIOC_QUERYCAP, cp)
return cp
def print_cap(self):
cp = self.querycap()
for attr in capability_fields:
if attr == 'capabilities':
caps = getattr(cp, attr)
prefix = 'V4L2_CAP_'
capslist = []
for v4l2attr,value in v4l2.__dict__.items():
if v4l2attr.startswith(prefix):
if caps & value:
name = v4l2attr[len(prefix):]
capslist.append(name)
print '%s: %s' % (attr, capslist)
else:
print '%s: %s' % (attr, getattr(cp, attr))
def print_control(self, ctrl):
print ''
print 'Control: "%s"' % (ctrl['name'],)
for field in queryctrl_fields:
print ' %s: %s' % (field, ctrl[field])
print ' API: %s' % (ctrl['API'],)
def print_controls(self):
for ctrl in self.controls:
if 'focus' in ctrl['name'].lower():
self.print_control(ctrl)
def queryctrl(self, id):
ct = v4l2.v4l2_queryctrl()
ct.id = id
ioctl(self.dev, v4l2.VIDIOC_QUERYCTRL, ct)
return ct
def queryctrl_all(self):
qctrl = v4l2.v4l2_queryctrl()
qctrl.id = v4l2.V4L2_CTRL_FLAG_NEXT_CTRL
try:
ioctl(self.dev, v4l2.VIDIOC_QUERYCTRL, qctrl)
except:
print 'Device does not support V4L2_CTRL_FLAG_NEXT. Cannot query all controls.'
return []
controls = []
while True:
qctrldict = self.queryctrl_to_dict(qctrl)
controls.append(qctrldict)
qctrl.id |= v4l2.V4L2_CTRL_FLAG_NEXT_CTRL
try:
ioctl(self.dev, v4l2.VIDIOC_QUERYCTRL, qctrl)
except:
break
return controls
def type_to_pytype(self, qctrl):
t = qctrl.type
if t == v4l2.V4L2_CTRL_TYPE_INTEGER:
pytype = int
elif t == v4l2.V4L2_CTRL_TYPE_BOOLEAN:
pytype = bool
elif t == v4l2.V4L2_CTRL_TYPE_MENU:
pytype = MenuType(self.dev, qctrl)
elif t == v4l2.V4L2_CTRL_TYPE_BUTTON:
pytype = ButtonType()
else:
raise ValueError('what to do with control type?: %s' % (t,))
return pytype
def queryctrl_to_dict(self, qctrl):
d = {}
## easy ones
for field in ('default', 'id', 'minimum', 'maximum', 'name', 'step'):
d[field] = getattr(qctrl, field)
## others that need to be pythonified
d['flags'] = flags_to_list(qctrl.flags)
d['type'] = self.type_to_pytype(qctrl)
if qctrl.id in cid_name:
d['API'] = cid_name[qctrl.id]
else:
d['API'] = None
return d
def g_ctrl(self, id):
val = v4l2.v4l2_control()
val.id = id
ioctl(self.dev, v4l2.VIDIOC_G_CTRL, val)
return val.value
def s_ctrl(self, id, value):
val = v4l2.v4l2_control()
val.id = id
val.value = value
ioctl(self.dev, v4l2.VIDIOC_S_CTRL, val)
def get_inputs(self):
inputs = []
input = v4l2.v4l2_input()
index = 0
input.index = index
while True:
try:
err = ioctl(self.dev, v4l2.VIDIOC_ENUMINPUT, input)
except:
break
inputs.append(input)
input = v4l2.v4l2_input()
index += 1
input.index = index
return inputs
if __name__ == '__main__':
import sys
import time
try:
devname = sys.argv[1]
except:
devname = '/dev/video0'
dev = Device(devname)
dev.print_cap()
dev.print_controls()
| Python |
#!/usr/bin/env python
###########################################################################
# Copyright 2010 Jim Pulokas
#
# This file is part of focusfun.
#
# focusfun is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# focusfun is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with focusfun. If not, see <http://www.gnu.org/licenses/>.
###########################################################################
import time
import v4l2focuser
class Watcher(v4l2focuser.Focuser):
'''
Initialize it just like Focuser, then call either watch_changes or
watch_stable to start an infinite loop.
'''
def watch_changes(self):
'''
Check focus value continuously.
Report beginning and end of every focus change.
'''
## [time, focus, notsaved]
f_old = [time.time(), None, False]
while True:
f_new = [time.time(), self.get(), True]
if f_new[1] != f_old[1]:
for f in (f_old, f_new):
if f[2]:
print f[0], f[1]
f[2] = False
f_old = f_new
def watch_stable(self, dt):
'''
Check focus values continuously.
Report only when values stays stable for dt or more seconds.
'''
t00 = t0 = time.time()
f0 = self.get()
stable = False
while True:
t1 = time.time()
f1 = self.get()
# if unchanged
if f0 == f1:
# if just got stable (not already stable):
if not stable and t1-t0 > dt:
stable = True
print '%.2f,%d' % (t1-t00, f1)
# changed
else:
t0 = t1
f0 = f1
stable = False
if __name__ == '__main__':
import sys
method = sys.argv[1]
try:
dev = sys.argv[2]
except:
dev = '/dev/video0'
w = Watcher('/dev/video0')
if method == 'changes':
w.watch_changes()
elif method == 'stable':
w.watch_stable(0.01)
| Python |
#!/usr/bin/env python
###########################################################################
# Copyright 2010 Jim Pulokas
#
# This file is part of focusfun.
#
# focusfun is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# focusfun is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with focusfun. If not, see <http://www.gnu.org/licenses/>.
###########################################################################
import gstgrabber
import v4l2focuser
def grab_sequence(prefix, fseries):
grabber = gstgrabber.Grabber('/dev/video0')
focuser = v4l2focuser.Focuser(grabber.v4l2_fd)
for i,f in enumerate(fseries):
print i,f
focuser.set(f)
filename = '%s-%03d-%03d.png' % (prefix, i,f,)
grabber.grab_to_file(filename)
print 'saved', filename
if __name__ == '__main__':
import sys
prefix = sys.argv[1]
start = int(sys.argv[2])
stop = int(sys.argv[3])
step = int(sys.argv[4])
fseries = range(start, stop, step)
grab_sequence(prefix, fseries)
| Python |
#!/usr/bin/env python
###########################################################################
# Copyright 2010 Jim Pulokas
#
# This file is part of focusfun.
#
# focusfun is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# focusfun is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with focusfun. If not, see <http://www.gnu.org/licenses/>.
###########################################################################
import time
import v4l2focuser
class Watcher(v4l2focuser.Focuser):
'''
Initialize it just like Focuser, then call either watch_changes or
watch_stable to start an infinite loop.
'''
def watch_changes(self):
'''
Check focus value continuously.
Report beginning and end of every focus change.
'''
## [time, focus, notsaved]
f_old = [time.time(), None, False]
while True:
f_new = [time.time(), self.get(), True]
if f_new[1] != f_old[1]:
for f in (f_old, f_new):
if f[2]:
print f[0], f[1]
f[2] = False
f_old = f_new
def watch_stable(self, dt):
'''
Check focus values continuously.
Report only when values stays stable for dt or more seconds.
'''
t00 = t0 = time.time()
f0 = self.get()
stable = False
while True:
t1 = time.time()
f1 = self.get()
# if unchanged
if f0 == f1:
# if just got stable (not already stable):
if not stable and t1-t0 > dt:
stable = True
print '%.2f,%d' % (t1-t00, f1)
# changed
else:
t0 = t1
f0 = f1
stable = False
if __name__ == '__main__':
import sys
method = sys.argv[1]
try:
dev = sys.argv[2]
except:
dev = '/dev/video0'
w = Watcher('/dev/video0')
if method == 'changes':
w.watch_changes()
elif method == 'stable':
w.watch_stable(0.01)
| Python |
#!/usr/bin/env python
###########################################################################
# Copyright 2010 Jim Pulokas
#
# This file is part of focusfun.
#
# focusfun is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# focusfun is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with focusfun. If not, see <http://www.gnu.org/licenses/>.
###########################################################################
import Image
import glob
import numpack
def range_diff(prefix):
files = glob.glob('%s-???-???.png' % (prefix,))
files.sort()
file0 = files[0]
im0 = Image.open(file0)
im0 = numpack.image_to_array(im0)
im0 = numpack.to_float(im0)
for i,file in enumerate(files[1:]):
file1 = file
im1 = Image.open(file1)
im1 = numpack.image_to_array(im1)
im1 = numpack.to_float(im1)
diff = numpack.num.absolute(im1 - im0)
diffim = numpack.array_to_image(diff)
diffim.save('%s-diff-%03d.png' % (prefix, i))
diff = numpack.num.sum(numpack.num.ravel(diff))
print diff, file0, file1
file0 = file1
im0 = im1
if __name__ == '__main__':
import sys
prefix = sys.argv[1]
range_diff(prefix)
| Python |
#!/usr/bin/env python
###########################################################################
# Copyright 2010 Jim Pulokas
#
# This file is part of focusfun.
#
# focusfun is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# focusfun is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with focusfun. If not, see <http://www.gnu.org/licenses/>.
###########################################################################
#
# This module is based on another one that I was writing to create a general
# purpose v4l2 class. I never really finished that, so here I just hacked
# it to print out some info about focusing capabilities.
#
import os
import v4l2
from fcntl import ioctl
capability_fields = ['bus_info', 'capabilities', 'card', 'driver', 'version']
queryctrl_fields = ['default', 'flags', 'id', 'maximum', 'minimum', 'name', 'step', 'type']
queryctrl_flags_short = ['DISABLED', 'GRABBED', 'READ_ONLY', 'UPDATE', 'INACTIVE', 'SLIDER']
queryctrl_flags = ['V4L2_CTRL_FLAG_' + flag for flag in queryctrl_flags_short]
name_cid = {}
cid_name = {}
for key,value in v4l2.__dict__.items():
if key.startswith('V4L2_CID_'):
name_cid[key] = value
cid_name[value] = key
def flags_to_list(flags):
flaglist = []
for flag in queryctrl_flags:
flagint = getattr(v4l2, flag)
if flagint & flags:
flaglist.append(flag)
return flaglist
class MenuType(object):
def __init__(self, dev, qctrl):
self.max = qctrl.maximum
self.choices = {}
self.ichoices = {}
for i in range(self.max+1):
q = v4l2.v4l2_querymenu()
q.id = qctrl.id
q.index = i
ioctl(dev, v4l2.VIDIOC_QUERYMENU, q)
self.choices[q.name] = i
self.ichoices[i] = q.name
def __call__(self, arg):
'takes name or index, returns index'
if isinstance(arg, int):
if 0 <= arg <= self.max:
return arg
elif arg in self.choices:
return self.choices[arg]
raise ValueError('expected name or index of a menu choice')
def __str__(self):
names = [self.ichoices[i] for i in range(self.max+1)]
return 'Menu%s' % (names,)
class ButtonType(object):
pass
class Device(object):
def __init__(self, filename):
self.dev = os.open(filename, os.O_RDONLY)
self.controls = self.queryctrl_all()
def querycap(self):
cp = v4l2.v4l2_capability()
ioctl(self.dev, v4l2.VIDIOC_QUERYCAP, cp)
return cp
def print_cap(self):
cp = self.querycap()
for attr in capability_fields:
if attr == 'capabilities':
caps = getattr(cp, attr)
prefix = 'V4L2_CAP_'
capslist = []
for v4l2attr,value in v4l2.__dict__.items():
if v4l2attr.startswith(prefix):
if caps & value:
name = v4l2attr[len(prefix):]
capslist.append(name)
print '%s: %s' % (attr, capslist)
else:
print '%s: %s' % (attr, getattr(cp, attr))
def print_control(self, ctrl):
print ''
print 'Control: "%s"' % (ctrl['name'],)
for field in queryctrl_fields:
print ' %s: %s' % (field, ctrl[field])
print ' API: %s' % (ctrl['API'],)
def print_controls(self):
for ctrl in self.controls:
if 'focus' in ctrl['name'].lower():
self.print_control(ctrl)
def queryctrl(self, id):
ct = v4l2.v4l2_queryctrl()
ct.id = id
ioctl(self.dev, v4l2.VIDIOC_QUERYCTRL, ct)
return ct
def queryctrl_all(self):
qctrl = v4l2.v4l2_queryctrl()
qctrl.id = v4l2.V4L2_CTRL_FLAG_NEXT_CTRL
try:
ioctl(self.dev, v4l2.VIDIOC_QUERYCTRL, qctrl)
except:
print 'Device does not support V4L2_CTRL_FLAG_NEXT. Cannot query all controls.'
return []
controls = []
while True:
qctrldict = self.queryctrl_to_dict(qctrl)
controls.append(qctrldict)
qctrl.id |= v4l2.V4L2_CTRL_FLAG_NEXT_CTRL
try:
ioctl(self.dev, v4l2.VIDIOC_QUERYCTRL, qctrl)
except:
break
return controls
def type_to_pytype(self, qctrl):
t = qctrl.type
if t == v4l2.V4L2_CTRL_TYPE_INTEGER:
pytype = int
elif t == v4l2.V4L2_CTRL_TYPE_BOOLEAN:
pytype = bool
elif t == v4l2.V4L2_CTRL_TYPE_MENU:
pytype = MenuType(self.dev, qctrl)
elif t == v4l2.V4L2_CTRL_TYPE_BUTTON:
pytype = ButtonType()
else:
raise ValueError('what to do with control type?: %s' % (t,))
return pytype
def queryctrl_to_dict(self, qctrl):
d = {}
## easy ones
for field in ('default', 'id', 'minimum', 'maximum', 'name', 'step'):
d[field] = getattr(qctrl, field)
## others that need to be pythonified
d['flags'] = flags_to_list(qctrl.flags)
d['type'] = self.type_to_pytype(qctrl)
if qctrl.id in cid_name:
d['API'] = cid_name[qctrl.id]
else:
d['API'] = None
return d
def g_ctrl(self, id):
val = v4l2.v4l2_control()
val.id = id
ioctl(self.dev, v4l2.VIDIOC_G_CTRL, val)
return val.value
def s_ctrl(self, id, value):
val = v4l2.v4l2_control()
val.id = id
val.value = value
ioctl(self.dev, v4l2.VIDIOC_S_CTRL, val)
def get_inputs(self):
inputs = []
input = v4l2.v4l2_input()
index = 0
input.index = index
while True:
try:
err = ioctl(self.dev, v4l2.VIDIOC_ENUMINPUT, input)
except:
break
inputs.append(input)
input = v4l2.v4l2_input()
index += 1
input.index = index
return inputs
if __name__ == '__main__':
import sys
import time
try:
devname = sys.argv[1]
except:
devname = '/dev/video0'
dev = Device(devname)
dev.print_cap()
dev.print_controls()
| Python |
#!/usr/bin/env python
###########################################################################
# Copyright 2010 Jim Pulokas
#
# This file is part of focusfun.
#
# focusfun is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# focusfun is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with focusfun. If not, see <http://www.gnu.org/licenses/>.
###########################################################################
import gstgrabber
import v4l2focuser
def grab_sequence(prefix, fseries):
grabber = gstgrabber.Grabber('/dev/video0')
focuser = v4l2focuser.Focuser(grabber.v4l2_fd)
for i,f in enumerate(fseries):
print i,f
focuser.set(f)
filename = '%s-%03d-%03d.png' % (prefix, i,f,)
grabber.grab_to_file(filename)
print 'saved', filename
if __name__ == '__main__':
import sys
prefix = sys.argv[1]
start = int(sys.argv[2])
stop = int(sys.argv[3])
step = int(sys.argv[4])
fseries = range(start, stop, step)
grab_sequence(prefix, fseries)
| Python |
###########################################################################
# Copyright 2010 Jim Pulokas
#
# This file is part of focusfun.
#
# focusfun is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# focusfun is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with focusfun. If not, see <http://www.gnu.org/licenses/>.
###########################################################################
numpack = 'Numeric'
if numpack == 'numpy':
import numpy as num
uint8 = num.uint8
float32 = num.float32
num_buffer_type = num.dtype(uint8)
elif numpack == 'Numeric':
import Numeric as num
uint8 = num.UInt8
float32 = num.Float32
num_buffer_type = uint8
import Image
def array_to_image(a):
size = a.shape[1], a.shape[0]
a = a.astype(uint8)
im = Image.frombuffer('L', size, a, 'raw', 'L', 0, 1)
return im
def image_to_array(im):
im = im.convert('L')
s = im.tostring()
shape = im.size[1], im.size[0]
a = buffer_to_array(s, shape)
a.shape = im.size[1], im.size[0]
return a
def buffer_to_array(buffer, shape):
a = num.fromstring(buffer, num_buffer_type)
a.shape = shape
return a
def to_float(a):
return a.astype(float32)
| Python |
#!/usr/bin/env python
#
# Copyright 2006, 2007 Google Inc. All Rights Reserved.
# Author: danderson@google.com (David Anderson)
#
# Script for uploading files to a Google Code project.
#
# This is intended to be both a useful script for people who want to
# streamline project uploads and a reference implementation for
# uploading files to Google Code projects.
#
# To upload a file to Google Code, you need to provide a path to the
# file on your local machine, a small summary of what the file is, a
# project name, and a valid account that is a member or owner of that
# project. You can optionally provide a list of labels that apply to
# the file. The file will be uploaded under the same name that it has
# in your local filesystem (that is, the "basename" or last path
# component). Run the script with '--help' to get the exact syntax
# and available options.
#
# Note that the upload script requests that you enter your
# googlecode.com password. This is NOT your Gmail account password!
# This is the password you use on googlecode.com for committing to
# Subversion and uploading files. You can find your password by going
# to http://code.google.com/hosting/settings when logged in with your
# Gmail account. If you have already committed to your project's
# Subversion repository, the script will automatically retrieve your
# credentials from there (unless disabled, see the output of '--help'
# for details).
#
# If you are looking at this script as a reference for implementing
# your own Google Code file uploader, then you should take a look at
# the upload() function, which is the meat of the uploader. You
# basically need to build a multipart/form-data POST request with the
# right fields and send it to https://PROJECT.googlecode.com/files .
# Authenticate the request using HTTP Basic authentication, as is
# shown below.
#
# Licensed under the terms of the Apache Software License 2.0:
# http://www.apache.org/licenses/LICENSE-2.0
#
# Questions, comments, feature requests and patches are most welcome.
# Please direct all of these to the Google Code users group:
# http://groups.google.com/group/google-code-hosting
"""Google Code file uploader script.
"""
__author__ = 'danderson@google.com (David Anderson)'
import httplib
import os.path
import optparse
import getpass
import base64
import sys
def upload(file, project_name, user_name, password, summary, labels=None):
"""Upload a file to a Google Code project's file server.
Args:
file: The local path to the file.
project_name: The name of your project on Google Code.
user_name: Your Google account name.
password: The googlecode.com password for your account.
Note that this is NOT your global Google Account password!
summary: A small description for the file.
labels: an optional list of label strings with which to tag the file.
Returns: a tuple:
http_status: 201 if the upload succeeded, something else if an
error occured.
http_reason: The human-readable string associated with http_status
file_url: If the upload succeeded, the URL of the file on Google
Code, None otherwise.
"""
# The login is the user part of user@gmail.com. If the login provided
# is in the full user@domain form, strip it down.
if user_name.endswith('@gmail.com'):
user_name = user_name[:user_name.index('@gmail.com')]
form_fields = [('summary', summary)]
if labels is not None:
form_fields.extend([('label', l.strip()) for l in labels])
content_type, body = encode_upload_request(form_fields, file)
upload_host = '%s.googlecode.com' % project_name
upload_uri = '/files'
auth_token = base64.b64encode('%s:%s'% (user_name, password))
headers = {
'Authorization': 'Basic %s' % auth_token,
'User-Agent': 'Googlecode.com uploader v0.9.4',
'Content-Type': content_type,
}
server = httplib.HTTPSConnection(upload_host)
server.request('POST', upload_uri, body, headers)
resp = server.getresponse()
server.close()
if resp.status == 201:
location = resp.getheader('Location', None)
else:
location = None
return resp.status, resp.reason, location
def encode_upload_request(fields, file_path):
"""Encode the given fields and file into a multipart form body.
fields is a sequence of (name, value) pairs. file is the path of
the file to upload. The file will be uploaded to Google Code with
the same file name.
Returns: (content_type, body) ready for httplib.HTTP instance
"""
BOUNDARY = '----------Googlecode_boundary_reindeer_flotilla'
CRLF = '\r\n'
body = []
# Add the metadata about the upload first
for key, value in fields:
body.extend(
['--' + BOUNDARY,
'Content-Disposition: form-data; name="%s"' % key,
'',
value,
])
# Now add the file itself
file_name = os.path.basename(file_path)
f = open(file_path, 'rb')
file_content = f.read()
f.close()
body.extend(
['--' + BOUNDARY,
'Content-Disposition: form-data; name="filename"; filename="%s"'
% file_name,
# The upload server determines the mime-type, no need to set it.
'Content-Type: application/octet-stream',
'',
file_content,
])
# Finalize the form body
body.extend(['--' + BOUNDARY + '--', ''])
return 'multipart/form-data; boundary=%s' % BOUNDARY, CRLF.join(body)
def upload_find_auth(file_path, project_name, summary, labels=None,
user_name=None, password=None, tries=3):
"""Find credentials and upload a file to a Google Code project's file server.
file_path, project_name, summary, and labels are passed as-is to upload.
Args:
file_path: The local path to the file.
project_name: The name of your project on Google Code.
summary: A small description for the file.
labels: an optional list of label strings with which to tag the file.
config_dir: Path to Subversion configuration directory, 'none', or None.
user_name: Your Google account name.
tries: How many attempts to make.
"""
while tries > 0:
if user_name is None:
# Read username if not specified or loaded from svn config, or on
# subsequent tries.
sys.stdout.write('Please enter your googlecode.com username: ')
sys.stdout.flush()
user_name = sys.stdin.readline().rstrip()
if password is None:
# Read password if not loaded from svn config, or on subsequent tries.
print 'Please enter your googlecode.com password.'
print '** Note that this is NOT your Gmail account password! **'
print 'It is the password you use to access Subversion repositories,'
print 'and can be found here: http://code.google.com/hosting/settings'
password = getpass.getpass()
status, reason, url = upload(file_path, project_name, user_name, password,
summary, labels)
# Returns 403 Forbidden instead of 401 Unauthorized for bad
# credentials as of 2007-07-17.
if status in [httplib.FORBIDDEN, httplib.UNAUTHORIZED]:
# Rest for another try.
user_name = password = None
tries = tries - 1
else:
# We're done.
break
return status, reason, url
def main():
parser = optparse.OptionParser(usage='googlecode-upload.py -s SUMMARY '
'-p PROJECT [options] FILE')
parser.add_option('-s', '--summary', dest='summary',
help='Short description of the file')
parser.add_option('-p', '--project', dest='project',
help='Google Code project name')
parser.add_option('-u', '--user', dest='user',
help='Your Google Code username')
parser.add_option('-w', '--password', dest='password',
help='Your Google Code password')
parser.add_option('-l', '--labels', dest='labels',
help='An optional list of comma-separated labels to attach '
'to the file')
options, args = parser.parse_args()
if not options.summary:
parser.error('File summary is missing.')
elif not options.project:
parser.error('Project name is missing.')
elif len(args) < 1:
parser.error('File to upload not provided.')
elif len(args) > 1:
parser.error('Only one file may be specified.')
file_path = args[0]
if options.labels:
labels = options.labels.split(',')
else:
labels = None
status, reason, url = upload_find_auth(file_path, options.project,
options.summary, labels,
options.user, options.password)
if url:
print 'The file was uploaded successfully.'
print 'URL: %s' % url
return 0
else:
print 'An error occurred. Your file was not uploaded.'
print 'Google Code upload server said: %s (%s)' % (reason, status)
return 1
if __name__ == '__main__':
sys.exit(main())
| Python |
#!/usr/bin/env python
# coding: utf-8
from util import db,isint,isbigint,isvarchar,isdate,istext,issmallint
from datetime import datetime
is_debug=False
####### users ######
def register(i):
isvarchar(i.email,1,50)
isvarchar(i.password,1,100)
return db.insert('users',
id=i.id,email=i.email,password=i.password,registerdate=datetime.now(),status=1,
_test=is_debug)
def login(email,password):
users = db.select('users',where='email=$email',vars=locals(), _test=is_debug)
if not users:
return False
user = users[0]
if user.password != password:
return False
return user
def auth(uid,secretkey):
users = db.select('users',where="id=$uid and secretkey=$secretkey",vars=locals(), _test=is_debug)
return users
def changepwd(i):
isbigint(i.id)
isvarchar(i.email,1,50)
isvarchar(i.password,1,100)
isdatetime(i.registerdate)
isdatetime(i.lastlogindate)
issmallint(i.status)
id = i.id
db.update('users',where='id=$id',
id=i.id,email=i.email,password=i.password,registerdate=i.registerdate,lastlogindate=i.lastlogindate,status=i.status,
vars=locals(),_test=is_debug)
def setstatus(id,status):
db.update('users',where='id=$id', status = status, vars=locals(),_test=is_debug)
| Python |
#!/usr/bin/env python
# coding: utf-8
from util import db,isint,isbigint,isvarchar,isdate,istext,issmallint
from datetime import datetime
is_debug=False
####### notes ######
def create(uid,gid,content):
isbigint(uid)
isbigint(gid)
istext(gid)
summary = content[:150]
return db.insert('notes',
groupid=gid,userid=uid,notes=content,summary= summary,createdate=datetime.now(),status=1,
_test=is_debug)
def update(i):
isbigint(i.id)
isbigint(i.groupid)
istext(i.notes)
i.summary = i.notes[:150]
id = i.id
db.update('notes',where='id=$id',
groupid=i.groupid,notes=i.notes,summary=i.summary,
vars=locals(),_test=is_debug)
def setstatus(id,status):
isbigint(id)
issmallint(status)
db.update('notes',where='id=$id', status = status, vars=locals(),_test=is_debug)
def setdel(id):
isbigint(id)
setstatus(id,0)
def delete():
db.delete('notes',where='status=0',_test=is_debug)
def read(id,uid):
isbigint(id)
isbigint(uid)
results = db.select('notes',where='id=$id and userid=$uid',vars=locals(), _test=is_debug)
if not results:
return False
return results[0]
def save(i):
if read(i.id):
update(i)
else:
create(i)
def all(uid):
return list(db.select('notes', what='id,summary',
where='userid=$uid and status=1', order="openorder",vars=locals(), _test=is_debug))
def group(gid):
return list(db.select('notes', what='id,summary',
where='groupid=$gid and status=1', order="openorder",vars=locals(), _test=is_debug))
def opened(uid):
return list(db.select('notes', what='id,summary',
where='userid=$uid and openorder>0', order="openorder",vars=locals(), _test=is_debug))
def deleted(uid):
return list(db.select('notes', what='id,summary',
where='userid=$uid and status=0', order="openorder",vars=locals(), _test=is_debug))
def filter(**fields):
filters = ' and '.join(['%s=%s' % (key,fields[key]) for key in fields])
return list(db.select('notes', what='id,summary',
where=filters, order="lastmodifieddate desc", _test=is_debug))
| Python |
#!/usr/bin/env python
# coding: utf-8
from util import db,isint,isbigint,isvarchar,isdate,istext,issmallint
from datetime import datetime
is_debug=False
####### groups ######
def create(i):
isint(i.userid)
isvarchar(i.name,1,50)
return db.insert('groups',
userid=i.userid,name=i.name,createdate=datetime.now(),
_test=is_debug)
def update(i):
isint(i.id)
isvarchar(i.name,1,50)
id = i.id
db.update('groups',where='id=$id',name=i.name,
vars=locals(),_test=is_debug)
def setstatus(id,status):
db.update('groups',where='id=$id', status = status, vars=locals(),_test=is_debug)
def delete(id):
setstatus(id,0)
def read(id,uid):
results = db.select('groups',where='id=$id and userid=$uid',vars=locals(), _test=is_debug)
if not results:
return False
return results[0]
def save(i):
if i.id < 0:
return create(i)
if read(i.id):
update(i)
else:
create(i)
def filter(**fields):
filters = ' and '.join(['%s=%s' % (key,fields[key]) for key in fields])
filters += ' and status=1'
return list(db.select('groups', what='id,name,notescount',
where=filters, order = 'id', _test=is_debug))
| Python |
#!/usr/bin/env python
# coding: utf-8
import web
db = web.database(dbn='mysql', db='fnotes', user='root', pw='sa')
def isint(i):
pass
def isbigint(i):
pass
def isvarchar(i,min,max):
pass
def isdate(i):
pass
def istext(i):
pass
def issmallint(i):
pass
| Python |
#!/usr/bin/env python
# coding: utf-8
import util
import groups
import notes
import user
| Python |
#!/usr/bin/env python
# coding: utf-8
import web
import api
pre='controls.'
urls=(
"/api", api.app_api,
'/',pre+'Index',
'/ipad',pre +'iPad',
'/login',pre+'Login',
'/logout',pre +'Logout'
)
app = web.application(urls,globals())
if web.config.get('_session') is None:
session = web.session.Session(app, web.session.DiskStore('sessions'), {'islogin':False,'usr':None})
web.template.Template.globals['context'] = session
web.config._session = session
if __name__ =='__main__':
app.run()
| Python |
#!/usr/bin/env python
# coding: utf-8
import web
import da
render = web.template.render('templates')
def getuserid():
if web.config._session.get('usr') is None:
return False
return web.config._session.usr.id
class Index:
def GET(self):
uid = getuserid()
if not uid:
return render.login()
model = web.storage()
model.groups = da.groups.filter(userid=uid) #
model.notes = da.notes.all(uid) #
model.onotes = da.notes.opened(uid); #common notes
model.dnotes = da.notes.deleted(uid)
return render.index(model)
class Pad:
def GET(self):
return render.pad()
class Phone:
def GET(self):
return render.phone()
class Login:
def GET(self):
return render.login()
def POST(self):
i = web.input()
usr = da.user.login(i.email,i.password)
if not usr:
raise web.badrequest() #?
session = web.config._session
session.islogin = True
session.usr = usr
web.setcookie('userid', usr.id, 360000)
web.setcookie('secretkey', usr.secretkey, 360000)
raise web.seeother('/')
class Logout:
def GET(self):
web.config._session.kill()
raise web.seeother('/')
| Python |
#!/usr/bin/env python
# coding: utf-8
import web
import datetime,json,re,base64,string
import da
urls = (
'/groups', 'Groups',
'/group/(\d+)', 'Group',
'/notes/opened', 'OpenedNotes',
'/notes/deleted', 'DeletedNotes',
'/notes/(\d+)', 'Notes',
'/note/(\d+)', 'Note',
)
app_api = web.application(urls, locals())
def httpmethod_hook():
'''REST, because some browsers ajax does not support PUT,DELETE method. '''
web.ctx.method = web.input().get("__method", web.ctx.method)
unauth_urls=('/login','/register','/getpwd')
def auth_hook():
'''except urls: /login, /register, /getpwd'''
path = web.ctx.path
if any(url in path for url in unauth_urls):
return
web.ctx.currentuserid = authenticate()
def json_hook():
'''should we foce all the response is json, or only the GET method ?'''
web.header('Content-Type', 'application/json')
#how to get the result and output via json?
app_api.add_processor(web.loadhook(auth_hook))
app_api.add_processor(web.loadhook(httpmethod_hook)) #has order ?
app_api.add_processor(web.unloadhook(json_hook))
################### common function ###########################
class ExtendedEncoder(json.JSONEncoder):
'''http://stackoverflow.com/questions/6182967/how-to-format-a-mysql-query-into-json-using-webpy'''
def default(self, o):
if isinstance(o, datetime.datetime):
return o.strftime("%Y-%m-%d %H:%M:%S")
return json.JSONEncoder(self, o)
def authenticate():
'''http://webpy.org/cookbook/userauthbasic'''
uid = False
auth = web.ctx.env.get('HTTP_AUTHORIZATION')
authreq = False
if auth is None:
authreq = True
else:
auth = re.sub('^Basic','',auth)
uid,secretkey=base64.decodestring(auth).split(':')
if not da.user.auth(uid,secretkey):
authreq = True
if authreq:
web.header('WWW-Authenticate','Basic realm="Unauthorized!"')
raise web.unauthorized()
return uid
def getgroup(gid,uid):
group = da.groups.read(gid,uid)
if not group:
raise web.notfound()
else:
return group
def getnote(nid,uid):
note = da.notes.read(nid,uid)
if not note:
raise web.notfound()
else:
return note
######### rest api, need authenticate #######################
class Login:
def POST(self):
i = web.input()
usr = da.user.login(i.email,i.password)
if not usr:
raise web.unauthorized()
usr = {id:usr.id, secretkey:usr.secretkey}
web.header('Content-Type', 'application/json')
return json.dumps(usr)
class Groups:
def GET(self):
groups = da.groups.filter(userid=web.ctx.currentuserid)
return json.dumps(groups)
class Group:
def GET(self,id):
group = getgroup(id,web.ctx.currentuserid)
return json.dumps(group,cls=ExtendedEncoder)
def POST(self,id):
i = web.input()
i.userid = web.ctx.currentuserid
gid = da.groups.create(i)
return gid #json or not?
def PUT(self,id):
group = getgroup(id,web.ctx.currentuserid) #it's the wasted to get full group info to validate the owner? use cache
i = web.input()
i.id = id
da.groups.update(i)
return id
def DELETE(self,id):
group = getgroup(id,web.ctx.currentuserid)
da.groups.delete(id)
class Notes:
def GET(self,gid):
uid = web.ctx.currentuserid
#gid if int, group
#if string, merge OpenedNotes/DeletedNotes into this function.
if string.atoi(gid) > 0:
group = getgroup(gid,uid)
notes = da.notes.group(gid)
else:
notes = da.notes.all(uid) #all notes of current user
return json.dumps(notes) #,cls=ExtendedEncoder
class OpenedNotes:
def GET(self):
notes = da.notes.opened(web.ctx.currentuserid)
return json.dumps(notes)
def POST(self):
## StringIO.StringIO(web.data())
d = web.data()
pass
class DeletedNotes:
def GET(self):
notes = da.notes.deleted(userid=web.ctx.currentuserid)
return json.dumps(notes)
class Note:
def GET(self,id):
note = getnote(id,web.ctx.currentuserid)
return json.dumps(note,cls=ExtendedEncoder)
def POST(self,id):
uid = web.ctx.currentuserid
i = web.input()
gid = string.atoi(i.groupid)
if gid > 0:
group = getgroup(gid,uid)
else:
pass ##how to get the default group id?
nid = da.notes.create(uid,gid,i.notes)
return nid;
def PUT(self,id):
uid = web.ctx.currentuserid
note = getnote(id,uid) ##is note owner
i = web.input()
if note.groupid != i.groupid:
gid = string.atoi(i.groupid)
if gid > 0:
group = getgroup(gid,uid) ##is group owner
else:
pass ##how to get the default group id?
i.id = id
da.notes.update(i)
return id
def DELETE(self,id): ##delete from normal or trash?
note = getnote(id,web.ctx.currentuserid)
if note.status == 1:
da.notes.setdel(id)
else:
da.notes.setdel(id) ##?
return
| Python |
#!/usr/bin/env python
import codecs
import re
import jinja2
import markdown
def process_slides():
with codecs.open('../../presentation-output.html', 'w', encoding='utf8') as outfile:
md = codecs.open('slides.md', encoding='utf8').read()
md_slides = md.split('\n---\n')
print 'Compiled %s slides.' % len(md_slides)
slides = []
# Process each slide separately.
for md_slide in md_slides:
slide = {}
sections = md_slide.split('\n\n')
# Extract metadata at the beginning of the slide (look for key: value)
# pairs.
metadata_section = sections[0]
metadata = parse_metadata(metadata_section)
slide.update(metadata)
remainder_index = metadata and 1 or 0
# Get the content from the rest of the slide.
content_section = '\n\n'.join(sections[remainder_index:])
html = markdown.markdown(content_section)
slide['content'] = postprocess_html(html, metadata)
slides.append(slide)
template = jinja2.Template(open('base.html').read())
outfile.write(template.render(locals()))
def parse_metadata(section):
"""Given the first part of a slide, returns metadata associated with it."""
metadata = {}
metadata_lines = section.split('\n')
for line in metadata_lines:
colon_index = line.find(':')
if colon_index != -1:
key = line[:colon_index].strip()
val = line[colon_index + 1:].strip()
metadata[key] = val
return metadata
def postprocess_html(html, metadata):
"""Returns processed HTML to fit into the slide template format."""
if metadata.get('build_lists') and metadata['build_lists'] == 'true':
html = html.replace('<ul>', '<ul class="build">')
html = html.replace('<ol>', '<ol class="build">')
return html
if __name__ == '__main__':
process_slides()
| Python |
# Django settings for YourSchool project.
import os
import ldap
from django_auth_ldap.config import LDAPSearch, GroupOfNamesType
#our project root folder
PROJECT_PATH = os.path.abspath(os.path.dirname(__file__))
AUTHENTICATION_BACKENDS = (
'django_auth_ldap.backend.LDAPBackend',
'django.contrib.auth.backends.ModelBackend',
)
AUTH_LDAP_SERVER_URI = "ldap://hirdc.hir.is:389/"
AUTH_LDAP_BIND_DN = "CN=wepo, OU=WEPO, OU=External, OU=Servcices, DC=hir, DC=is"
AUTH_LDAP_BIND_PASSWORD = "OJoo7kia"
AUTH_LDAP_USER_SEARCH = LDAPSearch("OU=People,DC=hir,DC=is", ldap.SCOPE_SUBTREE, "(sAMAccountName=%(user)s)")
LOGIN_URL = '/accounts/login'
LOGIN_REDIRECT_URL = '/exam/index'
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
'NAME': '/usr/pycharmprojects/YourSchool/db/db.sqlite', # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = os.path.join(PROJECT_PATH, 'media')
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = '/media/'
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = os.path.join(PROJECT_PATH, 'static')
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# URL prefix for admin static files -- CSS, JavaScript and images.
# Make sure to use a trailing slash.
# Examples: "http://foo.com/static/admin/", "/static/admin/".
ADMIN_MEDIA_PREFIX = '/static/admin/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = '$_pn_@d7g$m*5n$&n3^oi#tpocga#v+2kn_@c(+$%dltbjrm8l'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
ROOT_URLCONF = 'YourSchool.urls'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(PROJECT_PATH, 'templates'),
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.static',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Uncomment the next line to enable the admin:
# 'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
'YourSchool.Exams',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'stream_to_console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler'
},
},
'loggers': {
'django_auth_ldap': {
'handlers': ['stream_to_console'],
'level': 'DEBUG',
'propagate': True,
},
}
}
| Python |
from django.db import models
class ExamUser(models.Model):
user_name = models.CharField(max_length=64,null=False,unique=True)
full_name = models.CharField(max_length=64,null=True)
is_teacher=models.BooleanField(default=False)
def __unicode__(self):
return self.user_name
class Exam(models.Model):
title = models.CharField(max_length=50,null=False,unique=True)
description = models.TextField(null=True)
datecreated = models.DateTimeField(auto_now_add=True)
deadline = models.DateTimeField(null=False)
author = models.ForeignKey(ExamUser,null=False)
enabled=models.BooleanField(default=True)
def __unicode__(self):
return self.title
class ExamQuestion(models.Model):
question = models.CharField(max_length=120,null=False)
exam_id=models.ForeignKey(Exam,null=False)
weight = models.IntegerField(null=True)
def __unicode__(self):
return self.question
class ExamOption(models.Model):
answer_option=models.CharField(max_length=120,null=False)
is_correct=models.BooleanField(default=False)
exam_question_id=models.ForeignKey(ExamQuestion,null=False)
class StudentExam(models.Model):
student=models.ForeignKey(ExamUser,null=False)
exam_id =models.ForeignKey(Exam,null=False)
exam_grade=models.IntegerField(null=False)
date_submitted= models.DateTimeField(auto_now_add=True)
def __unicode__(self):
return self.student
class StudentAnswer(models.Model):
student=models.ForeignKey(ExamUser,null=False)
question_id=models.ForeignKey(ExamQuestion,null=False)
option_id=models.ForeignKey(ExamOption,null=False)
#def __unicode__(self):
# return self.student
| Python |
from django.http import Http404
__author__ = 'magnusg08'
from Exams.models import ExamUser,ExamQuestion,Exam,ExamOption,StudentAnswer,StudentExam
#adding to ExamUsers logged in users, used in views.py index function
def check_loginuser(loguser):
try:
obj = ExamUser.objects.get(user_name=loguser)
except ExamUser.DoesNotExist:
obj = ExamUser(user_name=loguser)
obj.save()
# Statistical functions
# Teacher should probably have access to all stat functions
# lists closed exams, how many participated, how many passed, how many failed, average grade for each exam
def teacher_stat(user):
pass
#lists closed exams , students grade in each, average grade in each, + teacher_stat for that exam
def student_stat(user):
pass
# shows requested exam with students answers , correct answers and students grade
def student_result(user):
print Exam.objects.all()
print Exam.objects.all().exclude(studentexam__student=user)
def already_taken_exam(studentid,examid):
try:
se = StudentExam.objects.get(exam_id = examid,student = studentid)
except StudentExam.DoesNotExist:
print "not taken exam"
def calculate_grade(studentid,examid):
try:
q=ExamQuestion.objects.filter(exam_id=examid).count()#number of questions in examid
# s=contains correct answers rom student studentid in exam examid
s=ExamOption.objects.filter(exam_question_id__exam_id=examid).filter(is_correct=1).filter(studentanswer__student=studentid)
grade=s.count()*10/q
except ZeroDivisionError:
grade=0
return grade
def calculate_avgrade(examid):
tot_grade=0
e=StudentExam.objects.filter(exam_id=examid)
n=e.count()
q=ExamQuestion.objects.filter(exam_id=examid).count()
for student in e:
try:
s=ExamOption.objects.filter(exam_question_id__exam_id=examid).filter(is_correct=1).filter(studentanswer__student=student.student)
grade=s.count()*10/q
except ZeroDivisionError:
grade=0
tot_grade += grade
av_grade=tot_grade/n
return av_grade
def calculate_order(studentid,examid):
order=1
studentsgrade=calculate_grade(studentid,examid)
e=StudentExam.objects.filter(exam_id=examid)
n=e.count()
q=ExamQuestion.objects.filter(exam_id=examid).count()
for student in e:
try:
s=ExamOption.objects.filter(exam_question_id__exam_id=examid).filter(is_correct=1).filter(studentanswer__student=student.student)
grade=s.count()*10/q
except ZeroDivisionError:
grade=0
if grade>studentsgrade:
order += 1
return order,n
# returns order from top and number of participants
| Python |
from django.db import models
from django.forms.models import ModelForm
from Exams.customfunctions import calculate_grade
from Exams.models import Exam, ExamOption, ExamQuestion, ExamUser, StudentExam, StudentAnswer
from pprint import pprint
class OptionsForm(ModelForm):
option = models.CharField()
class QuestionForm(ModelForm):
question = models.CharField(max_length=120)
options = []
correctAnswer = models.CharField(max_length=20)
class ExamForm():
id = models.IntegerField()
title = models.CharField(max_length=120)
description = models.TextField()
deadline = models.CharField(120)
enabled=models.BooleanField()
questions = []
#serialize and save to db
def SaveExamDataToDb(postedExam, username ):
#todo: validation
ex = Exam() #create new Exam object to populate and save
if not 'questions' in postedExam:
return False
#pprint(postedExam['questions'])
ex.title = postedExam['title']
ex.description = postedExam['description']
ex.deadline = postedExam['deadline']
ex.author = ExamUser.objects.get(user_name = username)
#ex.datecreated = ";models.DateTimeField(auto_now=True)
ex.enabled = True
ex.save() #save so we get an id
exId = ex.id
#get questions list object
questions = postedExam['questions']
#loop through all questions
for q in questions:
#create a new question for each iteration
if not 'options' in q:
return False
exQuestion = ExamQuestion()
exQuestion.exam_id = Exam.objects.get(pk=exId)
exQuestion.question = q['question']
exQuestion.save()
qId = exQuestion.id #get the new id
for o in q['options']:
qOption = ExamOption()
qOption.exam_question_id = ExamQuestion.objects.get(pk=qId)
qOption.answer_option = o['option']
qOption.is_correct = bool( o['correct'])
qOption.save()
# return true for success
return True
#a helper function that saves posted data to db when a student takes an exam
#input postedAnswers a Json object containing the post data
#username containing the username exam taker...
#returns True if successfull and crashes otherwise !!
def saveStudentAnswers(postedAnswers, username):
pprint(username)
pprint(postedAnswers)
student = ExamUser.objects.get(user_name = username)
eId = postedAnswers['eid']
pprint( "Exam id: " + str(eId) + " user id: " + str(student.id))
try:
se = StudentExam.objects.get(exam_id =eId,student = student.id)
#se = StudentExam.objects.raw(
# "select * from exams_studentexam where exam_id_id = " + str(eid) + " and student_id = " + str(student.id))
except StudentExam.DoesNotExist:
sEx = StudentExam()
sEx.exam_id = Exam.objects.get(pk=eId)
sEx.student = ExamUser.objects.get(user_name = username)
sEx.exam_grade = 0 #we have not calculated the grade yet
sEx.save()
studentExamId = sEx.id #now we have the new id of the newly created record
#then loop through the answers
answers = postedAnswers['answers']
for a in answers:
sAns = StudentAnswer()
sAns.student = ExamUser.objects.get(user_name = username)
sAns.option_id = ExamOption.objects.get(pk= a['answered_option'])
sAns.question_id = ExamQuestion.objects.get(pk=a['question_id'])
sAns.save() #save each exam answer to a question
return True
| Python |
"""
This file demonstrates writing tests using the unittest module. These will pass
when you run "manage.py test".
Replace this with more appropriate tests for your application.
"""
from django.test import TestCase
class SimpleTest(TestCase):
def test_basic_addition(self):
"""
Tests that 1 + 1 always equals 2.
"""
self.assertEqual(1 + 1, 2)
| Python |
from django.conf.urls.defaults import patterns, url
urlpatterns = patterns('',
(r'^index', 'YourSchool.Exams.views.index'),
(r'^statistics/(?P<param_exam_id>\d+)/$', 'YourSchool.Exams.views.statistics'),
(r'^create', 'YourSchool.Exams.views.create'),
(r'^show/(?P<param_exam_id>\d+)/$', 'YourSchool.Exams.views.show'),
(r'^take/(?P<param_exam_id>\d+)/$', 'YourSchool.Exams.views.take'),
)
| Python |
# Create your views here.
from datetime import datetime
from django.http import HttpResponseServerError
from Exams.forms import ExamForm, SaveExamDataToDb, saveStudentAnswers
from Exams.models import Exam, ExamUser, ExamQuestion, ExamOption, StudentExam
from Exams.models import ExamUser
from Exams.customfunctions import check_loginuser, calculate_grade, calculate_avgrade, calculate_order
from django.shortcuts import *
from django.contrib.auth.decorators import login_required
from pprint import pprint
from django.utils import simplejson
from django.views.decorators.csrf import csrf_exempt
@login_required
def index(request):
check_loginuser(request.user.username) #from customfunctions
student = ExamUser.objects.get(user_name = request.user.username)
exams_all=Exam.objects.all()
exams_open = Exam.objects.all().filter(deadline__gte=datetime.now()).exclude(studentexam__student=student.id).order_by('-deadline')
exams_open.exclude(enabled=False)
exams_taken=Exam.objects.all().filter(studentexam__student=student.id).order_by('-deadline')
exams_closed = Exam.objects.all().filter(deadline__lte=datetime.now()).order_by('-deadline')
#exams_closed.exclude(enabled=True)
#exams_closed = Exam.objects.all().exclude(exams_open)
exams = {"exams_open" : exams_open, "exams_taken":exams_taken ,"exams_closed" : exams_closed }
objExams = {"exams" : exams}
return render_to_response('exam/index.html', objExams, context_instance=RequestContext(request))
#
#only loggedin users can log out
@login_required
def logout(request):
#forward to login site
return render_to_response('registration/logged_out.html', context_instance=RequestContext(request))
@csrf_exempt
@login_required
def take(request, param_exam_id):
if request.method == "POST":
result = saveStudentAnswers(simplejson.loads(request.raw_post_data), request.user.username)
return HttpResponse(simplejson.dumps({"success":result}), mimetype="application/json")
else:
usr = ExamUser.objects.all().get(user_name=request.user.username)
exam = Exam.objects.get(pk = param_exam_id)
questions = ExamQuestion.objects.all().filter(exam_id = exam.id)
questionCount = questions.count()
questionsObj = []
for question in questions:
options = ExamOption.objects.all().filter(exam_question_id = question.id)
optionCount = options.count()
questionsObj.append ( {"question" : question, "options":options, "qid":question.id, "optioncount":optionCount})
formObj = {"exam" : exam, "questions": questionsObj, "questioncount":questionCount }
return render_to_response('exam/take.html', formObj, context_instance=RequestContext(request))
#try:
#questions = Exam.objects.all().filter()
#questions = ExamQuestion.objects.get(exam_id == param_exam_id)
#for q in questions:
# options = ExamOption.objects.get(exam_question_id = q.id )
# return HttpResponse(json.dumps(response_data), mimetype="application/json")
#except Exam.DoesNotExist:
# raise Http404
@login_required
def show(request, param_exam_id):
# get one exam by id and send to template
try:
exam = Exam.objects.get(pk=param_exam_id)
except Exam.DoesNotExist:
raise Http404
return render_to_response('exam/show.html', {"exam" : exam}, context_instance=RequestContext(request))
@login_required
def statistics(request, param_exam_id):
try:
student = ExamUser.objects.get(user_name = request.user.username)
grade = calculate_grade(student.id,param_exam_id )
avg = calculate_avgrade(param_exam_id)
(order, student_n) = calculate_order(student.id, param_exam_id)
exam = Exam.objects.get(pk=param_exam_id)
obj = {"exam":exam, "grade":grade, "average":avg, "order":order, "student_n":student_n}
except Exam.DoesNotExist:
raise Http404
return render_to_response('exam/statistics.html', obj, context_instance=RequestContext(request))
@login_required
def edit(request):
return render_to_response('exam/edit.html', context_instance=RequestContext(request))
#without inline test
@csrf_exempt
@login_required
def create(request):
if request.method == "POST":
exFrm = simplejson.loads(request.raw_post_data)
success = SaveExamDataToDb(exFrm, request.user.username)
return HttpResponse(simplejson.dumps({"success":success}), mimetype="application/json")
else:
return render_to_response('exam/create.html', context_instance=RequestContext(request)) | Python |
#!/usr/bin/env python
from django.core.management import execute_manager
import imp
try:
imp.find_module('settings') # Assumed to be in the same directory.
except ImportError:
import sys
sys.stderr.write("Error: Can't find the file 'settings.py' in the directory containing %r. It appears you've customized things.\nYou'll have to run django-admin.py, passing it your settings module.\n" % __file__)
sys.exit(1)
import settings
if __name__ == "__main__":
execute_manager(settings)
| Python |
from django.conf.urls.defaults import patterns, include, url
import settings
urlpatterns = patterns('',
url(r'^exam/',include('Exams.urls')),
(r'^exam', 'YourSchool.Exams.views.index'),
(r'^$','YourSchool.Exams.views.index'),
(r'^accounts/login','django.contrib.auth.views.login'),
(r'^accounts/logout','django.contrib.auth.views.logout'),
(r'^media/(?P<path>.*)/$','django.views.static.serve',{'document_root': settings.MEDIA_ROOT}),
)
| Python |
#!/usr/bin/env python
from django.core.management import execute_manager
import imp
try:
imp.find_module('settings') # Assumed to be in the same directory.
except ImportError:
import sys
sys.stderr.write("Error: Can't find the file 'settings.py' in the directory containing %r. It appears you've customized things.\nYou'll have to run django-admin.py, passing it your settings module.\n" % __file__)
sys.exit(1)
import settings
if __name__ == "__main__":
execute_manager(settings)
| Python |
# Django settings for YourSchool project.
#import ldap
#from django_auth_ldap.config import LDAPSearch, GroupOfNamesType
import os
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'yourSchoolDB.db', # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'media')
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = '/media/'
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# URL prefix for admin static files -- CSS, JavaScript and images.
# Make sure to use a trailing slash.
# Examples: "http://foo.com/static/admin/", "/static/admin/".
ADMIN_MEDIA_PREFIX = '/static/admin/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = '(11(#83#ctk(r2$n00)affck=nm2%*m@29%13xxgj6svoh%qr*'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
#'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
ROOT_URLCONF = 'YourSchool.urls'
TEMPLATE_DIRS = ('templates',)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Uncomment the next line to enable the admin:
'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
'YourSchool.exams',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'stream_to_console':{
'level': 'DEBUG',
'class': 'logging.StreamHandler'
},
},
'loggers': {
'django_auth_ldap':{
'handlers': ['stream_to_console'],
'level':'DEBUG',
'propagate': True,
}
}
}
LOGIN_URL = '/accounts/login'
LOGIN_REDIRECT_URL = '/'
TEMPLATE_CONTEXT_PROCESSORS = ("django.contrib.auth.context_processors.auth","django.core.context_processors.auth", "django.core.context_processors.debug", "django.core.context_processors.i18n", "django.core.context_processors.media", 'django.core.context_processors.request',)
#AUTHENTICATION_BACKENDS = ('django_auth_ldap.backend.LDAPBackend' , 'django.contrib.auth.backends.ModelBackend')
#AUTH_LDAP_SERVER_URI = "ldap://hirdc.hir.is:389/"
#AUTH_LDAP_BIND_DN = "CN=wepo,OU=WEPO,OU=External,OU=Service,DC=hir,DC=is"
#AUTH_LDAP_BIND_PASSWORD = "OJoo7kia"
#AUTH_LDAP_USER_SEARCH = LDAPSearch("OU=People,DC=hir,DC=is", ldap.SCOPE_SUBTREE, "(sAMAccountName=%(user)s)")
| Python |
from datetime import datetime
from django.db import models
class Exam(models.Model):
name = models.CharField(max_length=200)
owner = models.CharField(max_length=200)
created = models.DateTimeField(default=datetime.now())
examStartDay = models.DateField()
examEndDay = models.DateField()
examStartTime = models.TimeField()
examEndTime = models.TimeField()
averageGrade = models.DecimalField(max_digits=4, decimal_places=3)
duration = models.DecimalField(max_digits=5, decimal_places=2) #hours
active = models.IntegerField(default='1')
class Question(models.Model):
name = models.CharField(max_length=400)
examId = models.ForeignKey(Exam)
value = models.DecimalField(max_digits=3, decimal_places=0)
order = models.IntegerField()
correctAnswer = models.IntegerField()
class Answer(models.Model):
name = models.CharField(max_length=400)
rightAnswer = models.BooleanField()
questionId = models.ForeignKey(Question)
order = models.IntegerField()
class StudentExams(models.Model):
studentId = models.IntegerField()
examId = models.IntegerField()
questionId = models.IntegerField()
answerValue = models.IntegerField()
class Teacher(models.Model):
username = models.CharField(max_length=100)
#http://stackoverflow.com/questions/507795/can-i-use-a-database-view-as-a-model-in-django
#http://stackoverflow.com/questions/1041855/use-database-view-in-django
class ResultByStudent(models.Model):
studentId = models.IntegerField(db_column="studentId")
examId = models.IntegerField(db_column="examid")
name = models.CharField(max_length=200, db_column="name")
grade = models.IntegerField(db_column="grade")
class Meta:
db_table = 'exams_resultbystudent'
managed = False
class ExamFinished(models.Model):
studentId = models.IntegerField(db_column="studentId")
examId = models.IntegerField(db_column="examid")
name = models.CharField(max_length=200)
examStartDay = models.DateField(db_column="examStartDay")
examEndDay = models.DateField(db_column="examEndDay")
class Meta:
db_table = 'exams_examfinished'
managed = False
| Python |
"""
This file demonstrates writing tests using the unittest module. These will pass
when you run "manage.py test".
Replace this with more appropriate tests for your application.
"""
from django.test import TestCase
class SimpleTest(TestCase):
def test_basic_addition(self):
"""
Tests that 1 + 1 always equals 2.
"""
self.assertEqual(1 + 1, 2)
| Python |
from datetime import date, time, timedelta
from pprint import pprint
from django.http import HttpResponseRedirect
from exams.models import *
from django.shortcuts import render_to_response, redirect, HttpResponse, RequestContext, get_object_or_404
from django.contrib.auth import logout
from django.contrib.auth.decorators import login_required
def logout_view(request):
logout(request)
return HttpResponseRedirect('/')
@login_required
def home(request):
return render_to_response("index.html", RequestContext(request))
# gets exams by date for one year period
@login_required
def studentIndex(request):
#examTaken = ExamFinished.objects.all().filter(studentId = request.user.id, examEndDay__lt = date.today)
examExpired = Exam.objects.all().filter(examEndDay__lt = date.today)#Expired exams
examByToday = Exam.objects.all().filter(examStartDay__lt = date.today, examEndDay__gte = date.today)
examFuture = Exam.objects.all().filter(examStartDay__gt = date.today)
obj = { "examByToday":examByToday, "examExpired": examExpired, "examFuture": examFuture }
return render_to_response("exams/_studentIndex.html",obj, RequestContext(request))
@login_required
def teacherIndex(request):
if is_teacher(request.user) == False:
message = "You don't have permission from the Universe and beyond to create an exam!"
obj = {"message": message}
return render_to_response('index.html',obj, RequestContext(request))
else:
teachersExam = Exam.objects.filter(owner__exact=request.user, active = 1)
obj = {"teachersExam":teachersExam}
return render_to_response("exams/_teacherIndex.html",obj, RequestContext(request))
#delete action for user
@login_required
def disableExam(request):
if request.method == 'POST':
disableExam = Exam.objects.get(id__exact= request.POST ['delete']) #pk id, if pk exists then SELECT does update
disableExam.active -= 1
disableExam.save()
return render_to_response("exams/_teacherIndex.html", RequestContext(request))
#gets exam by request from student takeExam.html
@login_required
def takeExam(request, eId):
if request.method == 'GET':
exam = Exam.objects.get(id__exact=eId)
questions = Question.objects.all().filter(examId__exact=eId)
allAnswers = []
for i in range(0,questions.count()):
qid = questions[i].id
qname = questions[i].name
qanswers = Answer.objects.all().filter(questionId__exact=questions[i].id)
allAnswers.append( {"qid": qid, "qname": qname, "qanswers": qanswers,} )
obj = {"exam":exam, "allAnswers":allAnswers}
return render_to_response("exams/takeExam.html",obj, RequestContext(request))
#Handlers the answers from students answerExam.html POST -!!!!takeExam
def answerExam(request):
request.method == 'POST'
#questions = Question.objects.all().filter(examId__exact= request.POST['examid'])
#for i in range(0,questions.count()):
# new_examAnswers = StudentExams(
# studentId = request.user.id,
# examId = request.POST['examid'],
# questionId = request.POST ['question_'+str(questions[i].id)],
# answerId = request.POST ['answer_'+str(questions[i].id)])
# new_examAnswers.save()
q_counter = 1
whileBoolQ = True
while whileBoolQ:
if 'radio_'+str(q_counter) in request.POST:
new_examAnswers = StudentExams(
studentId = request.user.id,
examId = request.POST['examid'],
questionId = request.POST ['question'+str(q_counter)],
answerValue = request.POST ['radio_'+str(q_counter)]
)
new_examAnswers.save()
q_counter += 1
else:
whileBoolQ = False
return HttpResponseRedirect('/')
#Creates new exam by teacher createExam.html
@login_required
def createExam(request):
if request.method == 'GET':
if is_teacher(request.user) == False:
message = "You don't have permission from the Universe and beyond to create an exam!"
obj = {"message": message}
return render_to_response('index.html',obj, RequestContext(request))
else:
return render_to_response('exams/createExam.html', RequestContext(request))
else:#Post
request.method == 'POST'
q_counter = 1
q = []
whileBoolQ = True
while whileBoolQ:
if 'question'+str(q_counter) in request.POST:
q_name = request.POST ['question'+str(q_counter)]
q_order = q_counter
q_correctAnswer = request.POST ['rightAnswer_q'+str(q_counter)]
a_counter = 1
a = []
whileBoolA = True
while whileBoolA:
if 'q'+str(q_counter)+'_answer'+str(a_counter) in request.POST:
a_name = request.POST ['q'+str(q_counter)+'_answer'+str(a_counter)]
trueOrFalse = request.POST ['rightAnswer_q'+str(q_counter)]
a_order = a_counter
a.append({ "a_name": a_name, "trueOrFalse": trueOrFalse, "a_order": a_order })
a_counter += 1
else:
whileBoolA = False
q.append({ "q_name": q_name, "q_order": q_order ,"q_correctAnswer": q_correctAnswer,"answers": a, "a_count": a_counter,})
q_counter += 1
else:
whileBoolQ = False
questCounter = 0
for question in q:
questCounter += 1
new_exam = Exam(
name = request.POST ['examName'],
owner = request.user,
# created = request.POST ['created'],
examStartDay = request.POST ['examStart'],
examEndDay = request.POST ['examEnd'],
examStartTime = request.POST ['timeStart'],
examEndTime = request.POST ['timeEnd'],
averageGrade = 5,
duration = 1,
)
new_exam.save()
questionValue = 100/questCounter
for questions in q:
new_question = Question(
name = questions["q_name"],
order = questions["q_order"],
value = questionValue,
correctAnswer = questions["q_correctAnswer"],
examId = Exam.objects.order_by('-pk')[0]
)
new_question.save()
for answers in questions["answers"]:
new_answer = Answer(
name = answers["a_name"],
order = answers["a_order"],
rightAnswer = answers["trueOrFalse"],
questionId = Question.objects.order_by('-pk')[0]
)
new_answer.save()
return HttpResponseRedirect('/')
#Connected to a view that sums up the coorect aswers resultByStudent
def resultByStudent(request):
if request.method == 'GET':
allResultByStudent = ResultByStudent.objects.all().filter(studentId__exact = request.user.id)
if allResultByStudent:
obj = null
for i in allResultByStudent:
exam_counter = 0
sum_grade = 0
sum_grade += i.grade
exam_counter +=1
avg_grade = sum_grade/exam_counter
obj = {"allResultByStudent":allResultByStudent,"avg_grade":avg_grade}
return render_to_response('exams/resultByStudent.html',obj, RequestContext(request))
else:
message = "You have not taken any exams, therefore we can not show you your grades"
obj = {"message": message}
return render_to_response("index.html",obj, RequestContext(request))
def is_teacher(user):
return Teacher.objects.all().filter(username=user).count() > 0 | Python |
#!/usr/bin/env python
from django.core.management import execute_manager
import imp
try:
imp.find_module('settings') # Assumed to be in the same directory.
except ImportError:
import sys
sys.stderr.write("Error: Can't find the file 'settings.py' in the directory containing %r. It appears you've customized things.\nYou'll have to run django-admin.py, passing it your settings module.\n" % __file__)
sys.exit(1)
import settings
if __name__ == "__main__":
execute_manager(settings)
| Python |
from django.conf.urls.defaults import patterns, include, url
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
from exams import views
from exams.views import *
import settings
admin.autodiscover()
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'YourSchool.views.home', name='home'),
# url(r'^YourSchool/', include('YourSchool.foo.urls')),
('^$', home),
('^takeExam/(\d+)/$', takeExam ),
(r'^answerExam/$', answerExam ),
(r'^createExam/$', createExam),
('^resultByStudent/$', resultByStudent),
#('^resultByStudent/$', error(404)),
('^studentIndex/$', studentIndex),
('^teacherIndex/$', teacherIndex),
(r'^disableExam/$', disableExam),
(r'^accounts/login/$','django.contrib.auth.views.login'),
(r'^accounts/logout/$',logout_view),
# Uncomment the admin/doc line below to enable admin documentation:
url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
url(r'^admin/', include(admin.site.urls)),
(r'^media/(?P<path>.*)$', 'django.views.static.serve', {'document_root': settings.MEDIA_ROOT}),
) | Python |
#!/usr/bin/env python
from django.core.management import execute_manager
import imp
try:
imp.find_module('settings') # Assumed to be in the same directory.
except ImportError:
import sys
sys.stderr.write("Error: Can't find the file 'settings.py' in the directory containing %r. It appears you've customized things.\nYou'll have to run django-admin.py, passing it your settings module.\n" % __file__)
sys.exit(1)
import settings
if __name__ == "__main__":
execute_manager(settings)
| Python |
# *
# * Copyright (C) 2012-2013 Garrett Brown
# * Copyright (C) 2010 j48antialias
# *
# * This Program is free software; you can redistribute it and/or modify
# * it under the terms of the GNU General Public License as published by
# * the Free Software Foundation; either version 2, or (at your option)
# * any later version.
# *
# * This Program is distributed in the hope that it will be useful,
# * but WITHOUT ANY WARRANTY; without even the implied warranty of
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# * GNU General Public License for more details.
# *
# * You should have received a copy of the GNU General Public License
# * along with XBMC; see the file COPYING. If not, write to
# * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
# * http://www.gnu.org/copyleft/gpl.html
# *
# * Based on code by j48antialias:
# * https://anarchintosh-projects.googlecode.com/files/addons_xml_generator.py
""" addons.xml generator """
import os
import sys
# Compatibility with 3.0, 3.1 and 3.2 not supporting u"" literals
if sys.version < '3':
import codecs
def u(x):
return codecs.unicode_escape_decode(x)[0]
else:
def u(x):
return x
class Generator:
"""
Generates a new addons.xml file from each addons addon.xml file
and a new addons.xml.md5 hash file. Must be run from the root of
the checked-out repo. Only handles single depth folder structure.
"""
def __init__( self ):
# generate files
self._generate_addons_file()
self._generate_md5_file()
# notify user
print("Finished updating addons xml and md5 files")
def _generate_addons_file( self ):
# addon list
addons = os.listdir( "." )
# final addons text
addons_xml = u("<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"yes\"?>\n<addons>\n")
# loop thru and add each addons addon.xml file
for addon in addons:
try:
# skip any file or .svn folder or .git folder
if ( not os.path.isdir( addon ) or addon == ".svn" or addon == ".git" ): continue
# create path
_path = os.path.join( addon, "addon.xml" )
# split lines for stripping
xml_lines = open( _path, "r" , encoding="UTF-8").read().splitlines()
# new addon
addon_xml = ""
# loop thru cleaning each line
for line in xml_lines:
# skip encoding format line
if ( line.find( "<?xml" ) >= 0 ): continue
# add line
if sys.version < '3':
addon_xml += unicode( line.rstrip() + "\n", "UTF-8" )
else:
addon_xml += line.rstrip() + "\n"
# we succeeded so add to our final addons.xml text
addons_xml += addon_xml.rstrip() + "\n\n"
except Exception as e:
# missing or poorly formatted addon.xml
print("Excluding %s for %s" % ( _path, e ))
# clean and add closing tag
addons_xml = addons_xml.strip() + u("\n</addons>\n")
# save file
self._save_file( addons_xml.encode( "UTF-8" ), file="addons.xml" )
def _generate_md5_file( self ):
# create a new md5 hash
try:
import md5
m = md5.new( open( "addons.xml", "r" ).read() ).hexdigest()
except ImportError:
import hashlib
m = hashlib.md5( open( "addons.xml", "r", encoding="UTF-8" ).read().encode( "UTF-8" ) ).hexdigest()
# save file
try:
self._save_file( m.encode( "UTF-8" ), file="addons.xml.md5" )
except Exception as e:
# oops
print("An error occurred creating addons.xml.md5 file!\n%s" % e)
def _save_file( self, data, file ):
try:
# write data to the file (use b for Python 3)
open( file, "wb" ).write( data )
except Exception as e:
# oops
print("An error occurred saving %s file!\n%s" % ( file, e ))
if ( __name__ == "__main__" ):
# start
Generator() | Python |
# h1. fnfal
# Functional Fixed Arity Language
# josh 2011-09-19
# Many times I have needed to provide some small amount of
# scriptability for my programs to users reasonable well versed in
# command line usage, but ignorant of the intricacies of Python
# programming. This little library is a first stab at simple
# logo-style fixed arity language.
# h2. Front Matter
# h3. Imports
import sys, os, re, string, cStringIO, pprint
# h2. Utilities
def intp(data): return isinstance(data, int)
def stringp(data): # I hate unicode...
return isinstance(data, basestring)
def tuplep(data): return isinstance(data, tuple)
def listp(data): return isinstance(data, list)
def hashp(data): return isinstance(data, dict)
# An 'atom' is a word, or name, which admits to no other
# operations. It's immutable and I use it for all unquoted objects in
# the parse stream.
class atom(object):
def __init__(my, value): my.value = value
def __repr__(my): return "atom(%s)" % my.value
# The @word@ function parses out a single word (marked by an optional
# terminating delimiter (or set of delimeters)) from an input
# stream. The input stream should be a "file-like" object, which means
# an actual file or something derived from StringIO (Python's closest
# equivalent to REBOL series types).
def word(stream, stop=None):
"( stream stop -- (chunk, rest)) Parse next word from input stream."
if not stop: stop = " \t\n\r"
chunk = ""
escaped = False
while True:
c = stream.read(1)
if not c: break
if escaped:
chunk += c
escaped = False
elif c in stop:
if len(chunk): break # End of word.
# Otherwise consume leading whitespace.
else:
if not len(chunk):
if '"' == c:
chunk = '"%s"' % word(stream, '"')
c = stream.read(1) # Get rid of trailing quote.
break # We've got the string, so we're done.
elif ";" == c:
word(stream, "\n")
else: chunk += c
else:
if "\\" == c: escaped = True
chunk += c
return chunk
# In a Forth-like spirit, I don't want any special words. However,
# because of the prefix/infix notation, I must unavoidably have
# grouping operators. So, I have two: "[]" (list) indicates an
# unevaluated list, while the contents of "()" (tuple) are to be
# processed immediately. This allows algebraic grouping such that my
# target audience should be used to. Everything else becomes a string,
# a number or an 'atom'.
def collect(stream):
ast = [[]]
clause = ast[-1]
while True:
w = word(stream)
if not w: break
if "[" == w or "(" == w:
clause = []
ast.append(clause)
elif "]" == w or ")" == w:
x = ast.pop()
if ")" == w: x = tuple(x)
ast[-1].append(x)
clause = ast[-1]
elif w.startswith('"'):
clause.append(w[1:-1])
else:
try: clause.append(float(w))
except: clause.append(atom(w))
assert 1 == len(ast), "List not closed."
return ast[0]
# h2. Language Kernel
class fnfal(object):
# I statically define the @__d@ dictionary to avoid arguments with
# *attr functions.
__d = {}
# To initialize the interpreter, I load the dictionary with all the
# tagged words in the class. The dectionary format is docstring, arity
# and xt. When I have the capscity to define words, they will use a
# list instead of xt. I add extra words with special naming using the
# "opt()" method.
def __init__(my):
for a in dir(my):
if not a.startswith("fn"): continue
my.opt(getattr(my, a), a[2:])
my.opt(my.xtype, "type?")
def opt(my, xt, *names):
for name in names: my.__d[name] = (
xt.__doc__,
xt.func_code.co_argcount - 1,
xt)
# h3. Tokenization and Parsing
# Note: This needs to handle tuple evaluation (remember, lists are
# deferred, tuples are immediate). Also needs to understand variables.
def fndo(my, chunks):
results = []
while len(chunks):
chunk = chunks.pop(0)
if isinstance(chunk, atom):
try: doc, arity, xt = my.__d[chunk.value]
except ValueError:
raise SyntaxError, "I don't know how to %s." % chunk.value
args = []
for i in range(arity): args.append(my.fndo(chunks)[0])
results.append(apply(xt, args))
else: results.append(chunk)
return results
def fneval(my, text):
"( text -- results) Execute code in text."
if stringp(text): text = cStringIO.StringIO(text)
return my.fndo(collect(text))
# h3. Memory
def fnset(my, key, value):
my.__dictionary[key] = value
return value
def xtype(my, x): "( x -- type) Get type of x."; return type(x)
# h3. Math Operations
def fnadd(my, a, b): return a + b
def fnsub(my, a, b): return a - b
def fnmul(my, a, b): return a * b
def fndiv(my, a, b): return a / b
def fnmod(my, a, b): return a % b
# h3. Control Flow
def fnrepeat(my, times, body):
for i in range(times): my.fndo(body)
# In concept, this is what the fn word should do. This code will
# almost certainly not work.
def fnfn(my, name, args, body):
my.__d[name] = ("Documentation", len(args), body)
# h3. I/O Operations
def fnprint(my, msg):
"( msg -- msg) Print msg to stdout."
print msg
return msg
class model(fnfal):
def __init__(my):
fnfal.__init__(my)
def main(my):
my.fneval(collect(sys.argv[1:])) # XXX Need to modify collect
# to accept pre-parsed words.
if "__main__" == __name__:
# Tests
text = cStringIO.StringIO('This ; Once upon a time.\nGork "Hack a day." 75 "Hack \\"a\\" day."')
assert "This" == word(text)
assert "Gork" == word(text)
assert '"Hack a day."' == word(text)
assert "75" == word(text)
assert '"Hack \\"a\\" day."' == word(text)
s = cStringIO.StringIO("""\
def "fib" [
"( n -- fn) Compute Fibonnacci number."
n
] [
ifelse any [ 0 = n 1 = n ]
n ; Return unmodified argument.
[ fib ( n - 1 ) + fib ( n - 2 ) ]
]
""")
# pprint.pprint(collect(s))
assert 4 == len(collect(s)), "Should be four clauses in definition."
f = fnfal()
assert [8,] == f.fneval("8"), "Fndo: Numeric constant."
assert [8,] == f.fneval("add 5 3"), "Fndo: Add."
assert [8,] == f.fneval("print add 4 4"), "Fndo: Print."
assert [type(0.0),] == f.fneval("type? add 8 3"), "Fndo: Type result."
| Python |
#!/usr/bin/env python
import time
class StopWatch(object):
"""A small object to simulate a typical stopwatch."""
def __init__(self):
self.restart()
def __str__(self):
return str(time.time()-self.start)
def restart(self):
self.start = time.time()
| Python |
#!/usr/bin/env python
try:
import argparse
except ImportError:
import zipfile
import urllib2
import os
# A non-seekable stream doesn't suffice, downloading
open('argparse.zip', 'wb').write(urllib2.urlopen(
'http://argparse.googlecode.com/files/argparse-1.1.zip'
).read())
# Extract the module file
f = zipfile.ZipFile('argparse.zip')
open('argparse.py', 'w').write(f.read('argparse-1.1/argparse.py'))
f.close()
# Compile
import argparse
# Cleanup
os.remove('argparse.py')
os.remove('argparse.zip')
else:
print "Error: argparse is already available"
| Python |
#!/usr/bin/env python
from itertools import count, izip, dropwhile
from collections import deque
from operator import itemgetter
from bisect import bisect_left
import sys
import os
import time
import cPickle
import numpy as np
# additional modules
from vrptw import *
from vrptw.consts import *
from compat import *
# NEIGBOURHOOD OPERATORS - single step trials
operations = dict()
def operation(func):
"""A decorator for single solution operations."""
operations[func.__name__] = func
return func
@operation
def op_greedy_single(sol, randint = r.randint):
"""Neighbourhood operator - remove random customer and insert back."""
# pick a route
r = randint(0, sol.k-1)
pos = randint(0, sol.r[r][R_LEN]-2)
c = remove_customer(sol, r, pos)
insert_customer(sol, c)
@operation
def op_greedy_multiple(sol, randint = r.randint):
"""Remove a few customers from a random route and insert them back."""
r = randint(0, sol.k-1)
num_removed = randint(1, min(9, sol.r[r][R_LEN]-1))
removed = []
for i in xrange(num_removed):
removed.append(remove_customer(sol, r, randint(0, sol.r[r][R_LEN]-2)))
for c in removed:
insert_customer(sol, c)
def pick_short_route(sol, random=r.random):
"""Pick a route with preference for shortest."""
r_lengths = np.array([1.0/(rt[R_LEN]-1) for rt in sol.r]).cumsum()
return bisect_left(r_lengths, random()*r_lengths[-1])
def pick_long_route(sol, random=r.random):
"""Return a random route, with preference for the longer."""
lengths = np.array([rt[R_LEN]-1 for rt in sol.r]).cumsum()
return bisect_left(lengths, random()*lengths[-1])
@operation
def op_fight_shortest(sol, random=r.random, randint=r.randint):
"""Picks and tries to empty a random route with preference for shortest."""
r = pick_short_route(sol)
num_removed = min(randint(1, 10), sol.r[r][R_LEN]-1)
removed = []
for i in xrange(num_removed):
removed.append(remove_customer(sol, r, randint(0, sol.r[r][R_LEN]-2)))
for c in removed:
insert_customer(sol, c)
@operation
def op_tabu_single(sol, randint = r.randint, choice=r.choice):
"""Pick one customer from a random route and move him to a different."""
r = pick_short_route(sol)
old_len = sol.r[r][R_LEN]
old_k = sol.k
pos = randint(0, old_len-2)
c = remove_customer(sol, r, pos)
# d("Route %d, from %d, removed customer %d"%(r,pos,c))
for tries in xrange(sol.k-1):
# max k tries
r2 = randint(0, sol.k-2)
# picking all other with equal probability
if r2 >= r and sol.k == old_k: r2 +=1
# d("other route %d" % r2)
if sol.r[r2][R_LEN] < old_len:
continue
candidates = sorted(find_allpos_on(sol, c, r2))
if not candidates:
continue
dist, pos = candidates[-1] # choice(candidates)
# d("found pos %d (%.2f inc)" % (pos, dist))
insert_at_pos(sol, c, r2, pos)
return
# customer c from r failed to move - insert him back
u.undo()
@operation
def op_tabu_shortest(sol, randint = r.randint):
r = pick_short_route(sol)
num_removed = randint(1, sol.r[r][R_LEN]-1)
removed = []
for i in xrange(num_removed):
removed.append(remove_customer(sol, r, randint(0, sol.r[r][R_LEN]-2)))
for c in removed:
tried = set()
found = False
for tries in xrange(sol.k*2):
# max k tries
r2 = pick_long_route(sol)
if r2 in tried:
continue
# print "Long route", r2
# time.sleep(0.6)
dist, pos = find_bestpos_on(sol, c, r2)
if pos:
insert_at_pos(sol, c, r2, pos)
found = True
break
tried.add(r2)
if not found:
u.undo()
return
##print "We displaced %d customers from %d:" % (num_removed, r), removed
# major solution functions (metaheuristics)
def build_first(sol):
"""Greedily construct the first solution."""
sol.reset()
for c in sol.task.getSortedCustomers():
insert_customer(sol, c[ID])
sol.mem['init_order'] = VrptwTask.sort_order
u.commit()
sol.loghist()
def build_by_savings(sol, wait_limit = None, mi = 1):
"""Construct a new solution by savings heuristic."""
def check_saving(x, y):
"""Compute and return possible saving for concatenating x and y.
The return value is a tuple (saving, wait_time)"""
xk, _, arr_xk, _ = sol.r[x][R_EDG][-1]
_, y0, _, larr_y0 = sol.r[y][R_EDG][0]
arr_y0 = arr_xk + sol.t(xk, y0)
wait_y0 = max(0, sol.a(y0) - arr_y0)
if (sol.r[x][R_CAP]+sol.r[y][R_CAP] > sol.task.capa
or arr_y0 > larr_y0
or (wait_limit and wait_y0 > wait_limit) ):
return None, None
return sol.d(xk, 0) + sol.d(0, y0) - mi*sol.d(xk, y0), wait_y0
def list_savings():
"""Return list of possible savings as [(saving, route 1, route 2)]."""
savings = []
for i in xrange(sol.k):
for j in xrange(sol.k):
if i <> j:
s, w = check_saving(i, j)
if s is not None:
savings.append((s, -w, i, j))
return savings
sol.reset()
for c in xrange(sol.task.N):
insert_new(sol, c+1)
while True:
savings = list_savings()
if len(savings) == 0:
break
sav, wt, r1, r2 = max(savings)
# print 'saving', sav, 'by join of', r1, r2, 'wait', wt, 'in', sol.task.name
print "Joining %d and %d for %.1f saving." % (sol.r[r1][R_EDG][-1][E_FRO], sol.r[r2][R_EDG][0][E_TOW], sav)
join_routes(sol, r1, r2)
# turned off for efficiency (now assumed correct)
# sol.check()
u.commit()
return sol
def build_by_mfsavings(sol, wait_limit = None, mi = 1):
"""Build by maybe faster savings heuristic implementation.
Should actually provide the same results as 'normal' O(n**3) savings."""
sol.reset()
for c in xrange(sol.task.N):
insert_new(sol, c+1)
prevs = [0] * (sol.task.N+1)
nexts = [0] * (sol.task.N+1)
route = [None]+sol.r
possible = [ (mi*sol.d(i,j) - sol.d(0, i) - sol.d(j, 0), i, j)
for i in xrange(1, sol.task.N+1)
for j in xrange(1, sol.task.N+1)
if i <> j ]
possible.sort()
# TODO: check validity and perform savings
for sav, i, j in possible:
# third condition: already joint (ends of the same route)
if nexts[i] or prevs[j] or route[i]==route[j]:
continue
# check arrivals
xk, _, arr_xk, _ = route[i][R_EDG][-1]
_, y0, _, larr_y0 = route[j][R_EDG][0]
arr_y0 = arr_xk + sol.t(xk, y0)
wait_y0 = max(0, sol.a(y0) - arr_y0)
if (route[i][R_CAP]+route[j][R_CAP] > sol.task.capa
or arr_y0 > larr_y0
or (wait_limit and wait_y0 > wait_limit) ):
continue
# join routes
print "Joining %d and %d for %.1f saving." % (i, j, -sav)
prevs[j] = i
nexts[i] = j
# remember last customer, before joining
last_rj = route[j][R_EDG][-1][E_FRO]
prevdist = sol.dist
join_routes_ref(sol, route[i], route[j])
# careful - this must come after joining
route[last_rj] = route[i]
# print "result:\n", route[i]
# from compat import print_like_Czarnas
# print_like_Czarnas(sol, sparse=True)
# TURN ON IF NECCESSARY:
# sol.check_full()
return sol
def local_search(sol, oper, end=0, verb=False, speed=None):
"""Optimize solution by local search."""
# local rebinds
ci=u.commit; undo=u.undo; val=sol.val
oldval = val()
from time import time
# stats
updates = 0
steps = 0
start = time()
if end == 0:
end = time()+3
while time() < end:
steps += 1
oper(sol)
newval = val()
if newval < oldval:
oldval = newval
updates += 1
sol.loghist()
ci()
elif val()[0] == oldval[0]:
# huh, not worse, when it comes to routes
ci()
else:
undo()
elapsed = time()-start
if verb:
print " ".join([ sol.infoline(),
"%.1f s, %.2f fps, %d acc (%.2f aps)" % (
elapsed, steps/elapsed, updates, updates/elapsed) ])
# fps measurement from outside
if not speed is None:
speed.append(steps/elapsed)
sol.loghist()
return sol
# MISC. SOLUTION FUNCTIONS - postprocessing
def plot_history(sol):
"""Display a matplotlib graph of solution progress"""
from matplotlib import pyplot as plt
k, dist, t = zip(*sol.history)
fig = plt.figure()
fig.suptitle(sol.task.name+" "+sol.infoline())
# subplot of routes vs. time
kplt = fig.add_subplot(121)
kline = kplt.plot(t, k, 'g')
min_k = (sol.task.best_k or 2)-2
# scaling
kplt.axis([0, sol.history[-1][2], min_k, max(k)+1])
# labels etc.
plt.xlabel('time [s]')
plt.ylabel('routes (k)')
if sol.task.best_k:
kplt.axhline(sol.task.best_k+0.03)
# subplot of distance vs. time
dplt = fig.add_subplot(122)
dline = dplt.plot(t, dist, 'g')
# scaling the plot
min_d = min(dist+(sol.task.best_dist,))
max_d = max(dist+(sol.task.best_dist,))
span_d = max_d - min_d
dplt.axis([0, sol.history[-1][2], min_d-span_d/20., max_d+span_d/20.])
# decoration with labels, etc.
plt.grid(True)
dplt.set_xlabel('time [s]')
dplt.set_ylabel('dist')
dplt.yaxis.set_label_position("right")
dplt.yaxis.set_ticks_position("right")
if sol.task.best_dist:
dplt.axhline(sol.task.best_dist)
plt.show()
# aggressive route minimization
def find_replace_pos_on(sol, c, r):
"""Return a position (occupied), where the customer could be inserted."""
# pull out deep things locally
time = sol.task.time
cust = sol.task.cust
dist = sol.task.dist
c_A = cust[c][A]
c_B = cust[c][B]
edges = sol.r[r][R_EDG]
q_out = sol.r[r][R_CAP] + cust[c][DEM] - sol.task.capa
# customers - d - deleted, a - starting, b - final, c - inserted
a, d, arr_a, _ = edges[0]
for pos in xrange(1, len(edges)):
d, b, arr_d, larr_b = edges[pos]
# check for too early positions, and weight constraint
if c_A > larr_b or cust[d][DEM] < q_out:
a, d, arr_a, larr_d = d, b, arr_d, larr_b
continue
# check for too late - end of scan
if arr_a > c_B:
break
arr_c = max(c_A, arr_a+time[a][c])
arr_b = max(cust[b][A], arr_c+time[c][b])
larr_c = min(c_B, larr_b-time[c][b])
larr_a = min(cust[a][B], larr_c-time[c][b])
if arr_a <= larr_a and arr_c <= larr_c and arr_b <= larr_b:
distinc = dist[a][c]+dist[c][b]-(dist[a][d]+dist[d][b])
yield (distinc, pos-1)
# for next loop pass:
a, d, arr_a, larr_d = d, b, arr_d, larr_b
def find_replace_pos(sol, c):
for r in xrange(len(sol.r)):
# replacing single customer makes little sense
if sol.r[r][R_LEN] > 2:
for distinc, pos in find_replace_pos_on(sol, c, r):
yield (distinc, r, pos)
def short_light_route(sol):
"""Return the index of the shortest of the three lightest routes."""
from heapq import nsmallest
if sol.k > 3:
candidates = nsmallest(3, xrange(sol.k), key=lambda x: sol.r[x][R_CAP])
else:
candidates = xrange(sol.k)
return min( (sol.r[i][R_LEN], i) for i in candidates )[1]
def remove_route(sol, r):
"""Remove a route and retur a list of its customers."""
data = u.pop(sol.r, r)
cust = map(itemgetter(0), data[R_EDG])[1:]
u.ada(sol, 'k', -1)
u.ada(sol, 'dist', -data[R_DIS])
return cust
@operation
def op_route_min(sol, route=None, random=r.random, randint=r.randint, data=dict(die=0)):
"""Emulate the route minimization (RM) heuristic by Nagata et al."""
from collections import deque, defaultdict
if route is None:
r = short_light_route(sol)
else:
r = route
# print "I'll try to eliminate route", r+1
ep = deque(remove_route(sol, r))
# print "%d customers left to go:"% len(ep), ep
def insert(c, r, pos, ep):
# print "Customer %d goes to %d at pos %d" % (c, r+1, pos)
insert_at_pos(sol, c, r, pos)
#print_like_Czarnas(sol)
# print "Still left are:", ep
recycled = defaultdict(int)
def put_to_ep(c, front=True):
if front:
ep.appendleft(c)
else:
ep.append(c)
recycled[c] += 1
# print "Next (%d) round for %d" % (c, recycled[c])
if any(recycled[x] > 5 for x in ep):
# print "Too much recycling in the EP: dead end"
raise RuntimeError
while len(ep) > 0 and not data['die']:
c = ep.pop()
r = randint(0, sol.k-1)
_, pos = find_bestpos_on(sol, c, r)
if not pos is None:
insert(c, r, pos, ep)
continue
(_, pos), r = find_bestpos(sol, c)
if not pos is None:
insert(c, r, pos, ep)
continue
pos = sorted(find_replace_pos(sol, c))
if pos:
#print "Positions there:", pos
#raw_input()
_, r, p = pos[randint(0, min(5,len(pos)-1))]
put_to_ep(remove_customer(sol, r, p), False)
insert(c, r, p, ep)
continue
put_to_ep(c)
if len(ep) > 0:
print "Time out!"
raise RuntimeError
u.commit()
# MAIN COMMANDS
commands = set()
def command(func):
"""A command decorator - the decoratee should be a valid command."""
commands.add(func.__name__)
return func
# the CLUSTER command - mpi4py parallelism
def mpi_master(sol, comm, size, args):
from mpi4py import MPI
essencs = []
# 'inifinite' values:
my_k = sol.task.N
my_dist = sol.task.dist.sum()
stat = MPI.Status()
time_to_die = time.time() + args.wall
started = time.time()
# initial jobs - creating initial solutions
jobs = deque([('initial', k) for k in sort_keys.keys()])
if len(jobs) < size+5:
jobs.extend([('initial', 'by_random_ord')]*(size+5-len(jobs)))
print "initial jobs are:", jobs
for i in xrange(1, size):
comm.send(jobs.popleft(), dest=i)
# working loop
workers = size-1
while workers > 0:
resp = comm.recv(source=MPI.ANY_SOURCE, status=stat)
if time.time() < time_to_die and len(jobs)>0:
job = jobs.popleft()
while len(jobs) > 2000 and job[0]=='killroute' and job[2][0] > my_k+1:
job = jobs.popleft()
comm.send(job, dest=stat.Get_source())
else:
comm.send(('done',), dest = stat.Get_source())
workers -= 1
if resp[0] == 'initial' or resp[1] == 'ok':
essence = resp[2]
if (my_k, my_dist) > essence[:2]:
sol.set_essence(essence)
sol.loghist()
my_k = sol.k
my_dist = sol.dist
print "%.1f s, new best:" % (time.time()-started), sol.infoline()
if essence[0] < my_k + 2:
for x in xrange(essence[0]):
jobs.append(('killroute', x, essence))
elif resp[0] == 'killroute' and resp[1] == 'failed':
pass # TODO: no idea what failed... (not sent)
if len(jobs) > 1000000 and time_to_die <> 0:
print "We've got problems, %.1f s" % (time.time()-started)
time_to_die = 0
if len(jobs) == 0:
print "The jobs went out, %.1f s" % (time.time()-started)
sol.save('_clus')
exit()
def mpi_worker(sol, comm, rank, args):
# maybe start working immediately
while True:
orders = comm.recv(source=0)
# print rank, "recieved orders:", orders
if orders[0] == 'done':
break
elif orders[0] == 'initial':
VrptwTask.sort_order = orders[1]
build_first(sol)
comm.send(('initial','ok', sol.get_essence()), dest=0)
elif orders[0] == 'killroute':
sol.set_essence(orders[2])
try:
op_route_min(sol, orders[1])
comm.send(('killroute', 'ok', sol.get_essence()), dest=0)
except RuntimeError:
u.undo()
comm.send(('killroute', 'failed', sol.get_essence()), dest=0)
elif orders[0] == 'perturb':
pass
else:
print rank, "orders not understood", orders
print "Bye from worker", rank
exit()
@command
def cluster(args):
from mpi4py import MPI
comm = MPI.COMM_WORLD
size = comm.Get_size()
rank = comm.Get_rank()
if size < 2:
print "Sorry, only for > 1 process"
exit()
sol = VrptwSolution(VrptwTask(args.test))
if rank == 0:
mpi_master(sol, comm, size, args)
else:
mpi_worker(sol, comm, rank, args)
# POSTPROCESSING of old solutions
@command
def resume(args):
"""Load serialized solution, try to aliminate one or two routes."""
# autodestruction timeout mechanism:
data = dict(die=0)
def die():
data['die'] = 1
from threading import Timer
t = Timer(args.wall, die)
t.start()
sol = load_solution(args.test)
# print_like_Czarnas(sol)
tgt = pick_short_route(sol)
# guarded tries
try:
op_route_min(sol, tgt, data=data)
except:
t.cancel()
print "Failed removal of %d from %s, still: %d." % (tgt, sol.task.name, sol.k+1)
exit(1)
else:
t.cancel()
sol.check_full()
sol.save('_rsm')
print_like_Czarnas(sol)
print "Removed %d in %s, now: %s" % (tgt, sol.task.name, sol.infoline()),
@command
def perturb(args):
"""Load a solution and perform some search around."""
sol = load_solution(args.test)
local_search(sol, op_tabu_single)
print_like_Czarnas(sol)
sol.save('_pert')
@command
def grout(args):
"""Postprocess a solution using the proprietary grout program."""
import grout
sol = load_solution(args.test)
grout.DataLoader_load(sol.task.filename)
dd = grout.DistanceDecreaser()
dd.inflate(sol.flatten())
dd.setMaxEpochs(60)
best = grout.Solution()
dd.simulatedAnnealing(best)
sol.inflate(best.flatten())
sol.save('_grout')
print best.flatten()
# LOCAL SEARCH related techniques
def _optimize(test, op, wall, intvl):
"""An optimization funtion, which does not use argparse namespace."""
sol = VrptwSolution(VrptwTask(test))
build_first(sol)
print_like_Czarnas(sol)
print "Starting optimization for %d s, update every %s s." % (wall, intvl)
time_to_die = time.time() + wall
next_feedback = time.time() + intvl
while time.time() < time_to_die:
local_search(sol, operations[op], next_feedback, True)
print_like_Czarnas(sol)
next_feedback = time.time()+intvl
print "Wall time reached for %s." % test.name
sol.save()
print(sol.mem)
print_like_Czarnas(sol)
return sol
@command
def optimize(args):
"""Perform optimization of a VRPTW instance according to the arguments."""
sol = _optimize(args.test, args.op, args.wall, args.intvl)
return sol
def _optimize_by_name(arg):
# open the test filename (VrptwTask had problems with it)
arg[0] = open(arg[0])
return _optimize(*arg)
@command
def run_all(args):
"""As optimize, but runs all instances."""
from glob import glob
runs = args.runs or 1
all_tasks = [[n, args.op, args.wall, args.intvl]
for n in glob(args.glob) * args.runs]
if args.multi:
from multiprocessing import Pool
p = Pool()
p.map(_optimize_by_name, all_tasks)
else:
map(_optimize_by_name, all_tasks)
def load_solution(f):
"""Unpickle solution from a stream."""
solution_data = cPickle.load(f)
print os.path.dirname(__file__), solution_data['filename']
filename = os.path.join(os.path.dirname(__file__),
solution_data['filename'])
print "Loading solution from:", filename
sol = VrptwSolution(VrptwTask(open(filename)))
sol.k, sol.dist = solution_data['val']
sol.r = solution_data['routes']
sol.mem = solution_data['mem']
try:
sol.history = solution_data['history']
except: pass
if not sol.check_full():
return None
print "Solution loaded:", sol.infoline()
return sol
@command
def load(args):
"""Loads a previously saved solution for analysis."""
sol = load_solution(args.test)
print_like_Czarnas(sol)
print sol.mem
print sol.get_signature()
try:
if len(sol.history):
plot_history(sol)
else:
print "The solution has no history to plot"
except ImportError:
print "Plotting history impossible (missing GUI or matplotlib)"
@command
def export(args):
"""Create other formats for saved solution, like .vrp"""
# TODO: real export; for now just print successors
sol = load_solution(args.test)
print "\n".join(str(s) for s in sol.get_successors())
# POOLCHAIN metaheuristic and friends_
def worker(sol, pools, operators, config):
"""The actual working process in a poolchain."""
import Queue as q
from multiprocessing import Queue
proc_id, size, intvl, deadline = config
print "Worker launched, id:", proc_id
# disperse workers' random nubmer generators
r.jumpahead(20000*proc_id)
# disperse workers' feedback a bit (actually: random)
next_feedback = time.time() + (proc_id+1)*intvl
num_produced = 0
# the list for measurement of fps etc.
myfps = []
while time.time() < deadline:
# choose solution to work on this round
try:
# fish in the the pool
new_essence = pools[1].get_nowait()
sol.set_essence(new_essence)
print "Worker", proc_id, "got job:", sol.infoline()
except q.Empty:
# if nothing to take - produce new one or keep current
if num_produced < 5 or r.random() < 4.0/num_produced:
order = r.choice(sort_keys.keys())
VrptwTask.sort_order = order
build_first(sol)
print("Worker %d produced new: %s by %s" %
(proc_id, sol.infoline(), order))
# else: go on with current
# run optimization
local_search(sol, operators[1], next_feedback, speed=myfps)
next_feedback = time.time() + intvl*(size+1)
# throw the solution back to the pool
pools[2].put(sol.get_essence())
# endwhile:
# declare not to do any more output, send 'fps'
pools[2].put((proc_id, sum(myfps)/len(myfps), 0))
# print "Worker", proc_id, "should now finish."
@command
def poolchain(args):
"""Parallel optimization using a pool of workers and a chain of queues."""
import Queue as q
from multiprocessing import cpu_count, Process, Queue
time_to_die = time.time()+args.wall
# create own solution object (for test data being inherited)
began = time.time()
sol = VrptwSolution(VrptwTask(args.test))
# setup the queues
poison_pills = Queue()
input_ = Queue()
output = Queue()
queues = [ poison_pills, input_, output ]
oplist = [ None, operations[args.op], None ]
# create and launch the workers
num_workers = args.runs or cpu_count()
workers = [ Process(
target=worker, args=(sol, queues, oplist,
(i, num_workers, args.intvl, time_to_die)))
for i in xrange(num_workers) ]
map(Process.start, workers)
# get a solution from the fastest worker (we have to service them...)
print "Master waits for first solution..."
essence = output.get()
input_.put(essence)
sol.set_essence(essence)
print "Got first solution:", sol.infoline(), "after", time.time()-began
sol.loghist()
# the discriminators of the solution circulation
best_seen_k = essence[0]
best_essncs = [essence]
if best_seen_k == sol.task.best_k:
print "Best known route count immediately:", time.time()-began
sol.mem['best_k_found'] = time.time()-began
if args.strive:
time_to_die = time.time() + args.wall / 5.0
print "Wall time reduced to:", time_to_die - time.time()
# manage the pool for a while (now - simply feed them back)
# ---- START OF MAIN LOOP ----
while time.time() < time_to_die:
essence = output.get()
# drop solutions worse than best_seen_k+1
if essence[0] <= best_seen_k+1:
# -- check for route count record
if best_seen_k > essence[0]:
best_seen_k = essence[0]
if best_seen_k == sol.task.best_k:
print "Best known route count reached:", time.time()-began
sol.mem['best_k_found'] = time.time()-began
if args.strive and time_to_die > time.time()+args.wall/5.0:
time_to_die = time.time()+args.wall/5.0
print "Remaining time reduced to:", args.wall/5.0
# -- check against pool
pos = bisect_left(best_essncs, essence)
if ( len(best_essncs)<15
or (pos < 15 and best_essncs[pos][:2] <> essence[:2]) ):
# this solution is ok - pay it forward
input_.put(essence)
best_essncs.insert(pos, essence)
if len(best_essncs) > 15:
best_essncs.pop()
if pos == 0:
# new global best - remembering as a historical event
sol.set_essence(essence)
sol.loghist()
else:
# throw in one of the elite solutions
input_.put(r.choice(best_essncs))
elif r.random() < 0.5:
# if solution was bad (route count), maybe throw in old
input_.put(r.choice(best_essncs))
# ---- END OF MAIN LOOP ----
print "Wall time passed, after:", time.time()-began
fpss = []
workers_left = num_workers
while workers_left > 0:
k, dist, routes = output.get()
if routes == 0:
workers_left -= 1
print "Worker's",k,"pill-box received", time.time()-began
fpss.append(dist)
else:
if (k, dist) < sol.val():
sol.set_essence((k, dist, routes))
print 'got out from output: ', k, dist
print "Staff is to join: so much are alive:"
print map(Process.is_alive, workers)
print input_.qsize(), 'solutions still in queue 1'
try:
while True:
# print "Waiting for a solution"
k, dist, routes = input_.get(timeout=0.3)
if (k, dist) < sol.val():
sol.set_essence((k, dist, routes))
print 'got out: ', k, dist
except q.Empty:
pass
try:
output.get(timeout=0.1)
except q.Empty:
pass
else:
print "Possible rubbish in output"
print "Best solution chosen. Saving.", time.time()-began
sol.save('_pc') # suffix for poolchain
print_like_Czarnas(sol)
print "summary:", sol.task.name, "%d %.1f"%sol.val(), "%.1f %.1f"%sol.percentage(),
print "wall", args.wall, "workers", num_workers, "op", args.op, 'best_k',
try:
print "%.1f" % sol.mem['best_k_found'],
except KeyError:
print 'NO',
print 'fps', "%.1f" % sum(fpss)
#map(Process.join, workers)
print "\nTotal time elapsed:", time.time()-began
@command
def initials(args):
"""Produce initial solutions in all available ways, and 10x randomly."""
sol = VrptwSolution(VrptwTask(args.test))
results = []
best_order = None
build_by_savings(sol)
for k in sort_keys.keys():
VrptwTask.sort_order = k
build_first(sol)
results.append((sol.percentage(), k, sol.k))
if min(results) == results[-1]:
best_order = k
VrptwTask.sort_order = 'by_random_ord'
for i in xrange(9):
build_first(sol)
results.append((sol.percentage(), 'by_random_ord', sol.k))
rank = 1
for prec, k, sol_k in sorted(results):
print "%-20s %.2f %.2f routes %d rank %02d %s" % (
(k+':',)+prec+(sol_k, rank, sol.task.name))
rank += 1
# best deterministic order, or the order given
# will be used for saving
if args.order is None:
VrptwTask.sort_order = best_order
else:
VrptwTask.sort_order = args.order
build_first(sol)
sol.save("_init")
def mksol(name = 'c101'):
"""Produce a solution with a task for the given name (shorthand)."""
import os.path
sol = None
for cand in [name, 'solomons/%s'%name, 'hombergers/%s'%name,
'solomons/%s.txt'%name, 'hombergers/%s.txt'%name]:
if os.path.exists(cand):
sol = VrptwSolution(VrptwTask(cand))
return sol
def get_argument_parser():
"""Create and configure an argument parser.
Used by main function; may be used for programmatic access."""
try:
from argparse import ArgumentParser, Action
parser = ArgumentParser(
description="Optimizing VRPTW instances with some heuristics")
parser.add_argument(
"test", type=file, nargs='?', default=os.path.join(
os.path.dirname(__file__), 'hombergers','rc210_1.txt'),
help="the test instance: txt format as by M. Solomon")
parser.add_argument(
"command", choices=commands, nargs="?", default="poolchain",
help="main command to execute (when omitted: poolchain)")
parser.add_argument(
"--op", choices=operations.keys(), nargs="?",
default="op_fight_shortest", help="neighbourhood operator to use")
parser.add_argument(
"--runs", "-n", type=int, default=0,
help="repeat (e.g. optimization) n times, or use n processes")
parser.add_argument(
"--glob", "-g", default="hombergers/*.txt",
help="glob expression for run_all, defaults to all H")
parser.add_argument(
"--wall", "-w", type=int, default=600,
help="approximate walltime (real) in seconds")
parser.add_argument(
"--intvl", type=int, default=10,
help="approximate refresh rate (delay between messages)")
parser.add_argument(
"--strive", action="store_true",
help="run for best known route count, and then only short")
parser.add_argument(
"--multi", "-p", action="store_true",
help="use multiprocessing for parallelism e.g. with run_all")
parser.add_argument(
"--prof", action="store_true",
help="profile the code (don't do that), 10x slower")
class OptionAction(Action):
"""A dispatching action for option parser - global configs"""
def __call__(self, parser, namespace, values,
option_string=None):
if option_string in ['-o', '--output']:
VrptwSolution.outdir = values
elif option_string == '--order':
VrptwTask.sort_order = values
namespace.order = values
elif option_string in ['-s', '--seed']:
global r_seed
r_seed = int(values)
r.seed(r_seed)
parser.add_argument(
"--seed", "-s", action=OptionAction,
help="Set a custom RNG seed")
parser.add_argument(
"--output", "-o", default="output", action=OptionAction,
help="output directory for saving solutions")
parser.add_argument(
"--order", action=OptionAction, choices=sort_keys.keys(),
help="choose specific order for initial customers")
return parser
except ImportError:
print "Install argparse module"
raise
def main(can_profile = False):
"""Entry point when this module is ran at top-level."""
args = get_argument_parser().parse_args()
if can_profile and args.prof:
import cProfile
args.test.close()
cProfile.run('main()', 'profile.bin')
return
# execute the selected command
globals()[args.command](args)
if __name__ == '__main__':
main(True)
| Python |
#!/usr/bin/env python
# may perhaps even work on systems without numpy
try:
import numpy
except:
numpy = type('dummy', (object,), dict(float64=float))()
from vrptw import VrptwTask
class DummyTask(VrptwTask):
def __init__(self, cust = [
[0, 0, 0, 0, 0, 20, 0],
[1, 1, 1, 20, 1, 5, 1],
[2, 2, 2, 10, 0, 8, 1],
[3, 1, 2, 15, 4, 15, 1],
[4, 0, 1, 5, 10, 18, 1]
], Kmax = 10, capa = 200):
self.name = 'test'
self.cust = cust
self.Kmax = Kmax
self.capa = capa
self.N = len(self.cust)-1
self.precompute()
self.best_k, self.best_dist = None, None
def test_savings():
"""Check the savings (Clarke, Wright) construction method."""
from vrptw import VrptwSolution
sol = VrptwSolution(DummyTask())
from pygrout import build_by_savings
build_by_savings(sol)
assert sol.check()
def test_deepmap():
"""Check the utility for mapping nested lists and dictionaries."""
from organize import deepmap
from math import sqrt
plain_list = [1, 3, 2]
assert deepmap(lambda x: x+1, plain_list) == [2, 4, 3]
plain_dict = dict(a='hi', b='there')
assert deepmap(str.upper, plain_dict)==dict(a='HI', b='THERE')
nested = {'a': [0, 4, 16], 'b': [1, 9, 25]}
assert deepmap(sqrt, nested)=={'a': [0., 2., 4.], 'b':[1., 3., 5.]}
def _rec_assert_simmilar(a, b):
assert type(a)==type(b), 'wrong types: %s and %s' % (type(a), type(b))
if type(a) == list or type(a)==tuple:
for pair in zip(a, b):
_rec_assert_simmilar(*pair)
elif type(a) == int:
assert a == b
elif type(a) == float or type(a)==numpy.float64:
assert abs(a-b) < 1e-4
else:
assert False, 'unexpected type: '+str(type(a))
def test_flattening():
"""Checks the format for interchange with other programs, like grout."""
from pygrout import (VrptwSolution, VrptwTask, build_first,
print_like_Czarnas)
task = VrptwTask('solomons/rc208.txt')
s1 = VrptwSolution(task)
build_first(s1)
print_like_Czarnas(s1)
data1 = s1.flatten()
print data1
s2 = VrptwSolution(task)
s2.inflate(data1)
print "Ok, inflated... Let's see:"
print_like_Czarnas(s2)
print s2.flatten()
assert s2.check()
assert s2.flatten()==data1
_rec_assert_simmilar(s1.get_essence(), s2.get_essence())
# possible similar tests: test for assign, copy,
# {get,set}_essence of Solution. But these work already.
def test_find_pos():
"""Check consistency of finding the best position in a route."""
from pygrout import (VrptwSolution, VrptwTask, build_first,
print_like_Czarnas, find_bestpos_on, find_allpos_on, R_EDG)
sol = VrptwSolution(VrptwTask('solomons/rc206.txt'))
build_first(sol)
for i in xrange(sol.k):
for c in sol.r[i][R_EDG][1:]:
for j in xrange(sol.k):
if i <> j:
best = find_bestpos_on(sol, c[0], j)
allp = list(find_allpos_on(sol, c[0], j))
print "Best:", best, "all:", allp
if best == (None, None):
assert allp == []
else:
assert best in allp
assert best == max(allp)
def test_argmap():
"""Test of the class ArgMap from helper.py"""
try:
from helper import ArgMap
except ImportError:
print "Missing something: PyQt or matplotlib etc."
return
from glob import glob
m = ArgMap()
files = glob("solomons/r1*")
files.sort()
m.add(files)
assert m(files[0]) == 1
assert m.ticklabels == ['r101', 'r106']
assert m.ticks == [1, 6]
# Test left out, reenable in case of trouble ;)
def _test_initial_creation():
"""Unit test for creating solutions to all included benchmarks."""
from pygrout import VrptwSolution, VrptwTask, build_first
def check_one(test):
s = VrptwSolution(VrptwTask(test))
build_first(s)
assert s.check()==True, 'Benchmark %s failed at initial solution' % test
from glob import iglob
# Homberger's are too heavy
# from itertools import chain
# tests = chain(iglob("solomons/*.txt"), iglob('hombergers/*.txt'))
tests = iglob("solomons/*.txt")
for test in tests:
yield check_one, test
if __name__ == '__main__':
test_savings()
| Python |
from distutils.core import setup
from glob import glob
setup(name='Pygrout',
version='0.1',
description='VRPTW solving utility',
author='Tomasz Gandor',
url='http://code.google.com/p/pygrout/',
packages=['vrptw', 'solomons', 'hombergers'],
package_data = { 'vrptw': ['bestknown/*.txt'],
'hombergers': ['*.txt'],
'solomons': ['*.txt'] },
py_modules=['pygrout', 'compat', 'undo']
)
| Python |
from random import Random
from operator import itemgetter
import time
import cPickle
import os
import sys
import numpy as np
from undo import UndoStack
from consts import *
u = UndoStack()
"""Global undo - may be later made possible to override."""
r = Random()
"""The random number generator for the optimization."""
r_seed = int(time.time())
r.seed(r_seed)
# Possible customer ordering (when inserting into initial solution)
sort_keys = dict(
by_opening = lambda x: x[A], # by start of TW
by_closing = lambda x: x[B], # by end of TW
by_midtime = lambda x: x[A]+x[B], # by middle of TW
by_weight = lambda x: x[DEM], # by demand
by_opening_desc = lambda x: -x[A], # by start of TW
by_closing_desc = lambda x: -x[B], # by end of TW
by_midtime_desc = lambda x: -x[A]-x[B], # by middle of TW
by_weight_desc = lambda x: -x[DEM], # by demand
by_timewin = lambda x: x[B]-x[A], # ascending TW
by_timewin_desc = lambda x: x[A]-x[B], # descending TW
by_id = lambda x: 0, # unsorted
by_random_ord = lambda x: r.random() # random order
)
class VrptwTask(object):
"""Data loader - holds data of a VRPTW Solomon-formatted test."""
sort_order = 'by_timewin'
def __init__(self, stream, precompute = True):
if type(stream)==str: stream = open(stream)
lines = stream.readlines()
self.filename = stream.name
stream.close()
self.name = lines[0].strip()
self.Kmax, self.capa = map(int, lines[4].split())
self.cust = [ map(int, x.split()) for x in lines[9:] ]
import array
self.cust = [ array.array('i', map(int, x.split())) for x in lines[9:] ]
self.N = len(self.cust)-1
if precompute:
self.precompute()
self.load_best()
def precompute(self):
"""Initialize or update computed members: distances and times."""
# transpose customers, get Xs and Ys and SRVs
x, y, srv, demands = itemgetter(X, Y, SRV, DEM)(zip(*self.cust))
# make squares
xx = np.tile(x, (len(x), 1))
yy = np.tile(y, (len(y), 1))
# compute hypots - distances
self.dist = ((xx-xx.T)**2+(yy-yy.T)**2)**0.5
# compute travel times (including service)
self.time = self.dist + np.tile(srv, (len(srv),1)).T
# calculating demand-related values
self.demands = sorted(demands)
self.sum_demand = sum(demands)
self.kbound_min = -(-self.sum_demand//self.capa)
print "Sum of q: %d (k_min >= %d), Q(0..4) = %d %d %d %d %d" % (
self.sum_demand, self.kbound_min, self.demands[1],
self.demands[self.N//4], self.demands[self.N//2],
self.demands[self.N*3//4], self.demands[-1])
def routeInfo(self, route):
"""Displays a route summary."""
cap, dist = 0.0, 0.0
print "Route:"
for a, b, aa, lab in route[R_EDG]:
print ("From %2d(%2d,%3d) to %2d(%4d,%4d): "
"start(%.2f)+svc(%d)+dist(%5.2f)=startb(%.2f);ltst(%.2f)"
% (a, self.cust[a][A], self.cust[a][B],
b, self.cust[b][A], self.cust[b][B],
aa, self.cust[a][SRV], self.dist[a][b],
aa + self.cust[a][SRV] + self.dist[a][b], lab) )
if lab < aa + self.cust[a][SRV] + self.dist[a][b]:
print "!"*70
cap += self.cust[a][DEM]
dist += self.dist[a][b]
print " Dist now %.2f, load now %.2f" % (dist, cap)
print "Route stored dist %.2f, load %.2f" % (route[R_DIS], route[R_CAP])
def getSortedCustomers(self):
"""Return customer tuples."""
return sorted(self.cust[1:], key=sort_keys[VrptwTask.sort_order])
def load_best(self):
"""Look for saved best solution values in the bestknown/ dir."""
try:
self.best_k, self.best_dist = map(eval, open(
os.path.join(os.path.dirname(__file__), 'bestknown',
self.name+'.txt')).read().split())
print("Best known solution for test %(name)s: %(best_k)d routes,"
" %(best_dist).2f total distance." % self.__dict__)
except IOError as ioe:
self.best_k, self.best_dist = None, None
print >>sys.stderr, ("Best known solution not found for test: "
+self.name)
if os.path.exists(os.path.join('bestknown', self.name+'.txt')):
raise
def bestval(self):
"""Return best value pair."""
return (self.best_k, self.best_dist)
def error(msg):
"""A function to print or suppress errors."""
print msg
class VrptwSolution(object):
"""A routes (lists of customer IDs) collection, basically."""
# default output directory for saved solutions
outdir = os.path.join(os.path.dirname(__file__), '..', "output")
def __init__(self, task):
"""The task could be used to keep track of it."""
self.task = task
self.reset()
# additional field for any purpose
self.mem = {}
self.mem['r_seed'] = r_seed
self.mem['t_start'] = time.time()
self.history = []
def reset(self):
"""Reinitialize the solution as empty."""
self.r = []
self.dist = 0.
self.k = 0
def loghist(self):
"""Put the current time and value into the history list."""
self.history.append( [self.k, self.dist, time.time()-self.mem['t_start']] )
def val(self):
"""Return a tuple to represent the solution value; less is better."""
return (self.k, self.dist)
def percentage(self):
"""Return a tuple of precentage of current solution vs best known."""
if self.task.best_k:
return (100.*self.k/self.task.best_k, 100.*self.dist/self.task.best_dist)
return (100, 100)
def flatten(self):
"""Make a string representation of the solution for grout program."""
return "\n".join(
["%d %f" % (self.k, self.dist)] +
# E_TOW, i.e. edge targets
[" ".join(str(e[1]) for e in rt[R_EDG]) for rt in self.r] + ['0\n'])
def inflate(self, data):
"""Decode and recalculate routes from a string by flatten()."""
# forget everything now:
u.commit()
# trusting the saved values
lines = data.split("\n")
k, dist = lines[0].split()
self.k = int(k); self.dist = float(dist)
# constructing routes
self.r = []
dist_glob = 0
d = self.task.dist
t = self.task.time
cust = self.task.cust
for l in xrange(1, len(lines)-2):
# the last line should contain a newline, so -2
customers = map(int, lines[l].split())
edges = []
load = 0
dist = 0
a = 0
arr_a = 0
for b in customers:
edges.append([a, b, arr_a, 0])
load += cust[b][DEM]
dist += d[a][b]
arr_a = max(arr_a+t[a][b], cust[b][A])
a = b
# set latest arrivat to depot, for propagating later
edges[-1][3] = cust[0][B]
self.r.append([ len(customers), load, dist, edges ])
propagate_deadline(self, -1, len(customers)-1)
dist_glob += dist
self.dist = dist_glob
# Shorthands for access to task object.
def d(self, a, b):
return self.task.dist[a][b]
def t(self, a, b):
return self.task.time[a][b]
def a(self, c):
return self.task.cust[c][A]
def b(self, c):
return self.task.cust[c][B]
def dem(self, c):
return self.task.cust[c][DEM]
def route(self, i):
"""Render a short representation of route i."""
return "-".join(str(e[0]) for e in self.r[i][R_EDG])
def check(self, complete=False):
"""Checks solution, possibly partial, for inconsistency."""
unserviced = set(range(1, self.task.N+1))
for i in xrange(len(self.r)):
if not self.check_route(i, unserviced):
return False
if len(unserviced) and complete:
error("Unserviced customers left in %s: " % self.task.name + ", ".join(str(x) for x in sorted(unserviced)))
total_dist = sum(self.r[i][R_DIS] for i in xrange(self.k))
if abs(total_dist - self.dist) > 1e-3:
error("Wrong total dist: %f, while sum: %f (%d routes for %s)" % (total_dist, self.dist,
self.k, self.task.name))
return False
return True
def check_full(self):
"""Check full solution - shorthand method."""
return self.check(True)
def check_route(self, i, unserviced_ = None ):
"""Check route i for consistency.
Remove found customers from unserviced_"""
now, dist, cap, l = 0, 0, 0, 0
unserviced = unserviced_ if unserviced_ is not None else set(range(1, self.task.N+1))
for fro, to, afro, lato in self.r[i][R_EDG]:
actual = max(now, self.a(fro))
if afro <> actual:
error("Wrong time: %.2f (expected %.2f, err %.3f) on rt %d"
" edge %d from %d to %d, a(from) %d"
% (afro, actual, actual-afro, i, l, fro, to, self.a(fro)))
error(self.route(i))
return False
if fro:
if not fro in unserviced:
error("Customer %d serviced again on route %d" % (fro, i))
else:
unserviced.remove(fro)
dist += self.d(fro, to)
cap += self.dem(fro)
if cap > self.task.capa:
error("Vehicle capacity exceeded on route %d with customer %d" % (i, fro))
return False
l += 1
now = actual + self.t(fro, to)
if l != self.r[i][R_LEN]:
error("Wrong length %d (actual %d) for route %d" % (self.r[i][R_LEN], l, i))
return False
if abs(dist - self.r[i][R_DIS]) > 1e-4:
error("Wrong distance %f (actual %f) for route %d" % (self.r[i][R_DIS], dist, i))
return False
return True
def save(sol, extra=None):
"""Dump (pickle) the solution."""
import uuid
# handling unknown percentage (r207.50 and r208.50, actually)
prec_k, prec_d = map(
lambda x: "%05.1f" % x if sol.task.best_k else 'x'*5,
sol.percentage())
# time signature - minutes and seconds (too little?)
time_sig = "%02d%02d" % divmod(int(time.time())%3600, 60)
# additional markers
if not extra is None: time_sig += str(extra)
node_sig = hex(uuid.getnode())[-4:]
save_name = "%s-%s-%s-%02d-%05.1f-%s-%s.p" % (
sol.task.name, prec_k, prec_d, sol.k, sol.dist,
sol.get_signature()[:8], time_sig)
sol.mem['save_name'] = save_name
sol.mem['save_time'] = time.time()
sol.mem['t_elapsed'] = time.time() - sol.mem['t_start']
sol.mem['host_sig'] = node_sig
sol.mem['signature'] = sol.get_signature()
save_data = dict(
routes = sol.r,
mem = sol.mem,
val = sol.val(),
filename = sol.task.filename,
name = sol.task.name,
percentage = sol.percentage(),
history = sol.history )
if not os.path.exists(sol.outdir):
os.makedirs(sol.outdir)
target_path = os.path.join(sol.outdir, save_name)
if os.path.exists(target_path):
print "File %s - such solution already exists" % target_path
else:
cPickle.dump(save_data, open(target_path, 'wb'))
# not writing the copy - use the export command
# open(os.path.join(sol.outdir, save_name.replace('.p', '.vrp')), 'w').write(sol.flatten())
return sol
def copy(self):
"""Return a copy the solution in a possibly cheap way."""
clone = VrptwSolution(self.task)
clone.assign(self)
return clone
def assign(self, rvalue):
"""Assignment operator - copy essential features from another solution."""
self.k = rvalue.k
self.dist = rvalue.dist
self.r = cPickle.loads(cPickle.dumps(rvalue.r, 2))
def get_essence(self):
"""Return the most interesting part of the solution - routes."""
return (self.k, self.dist, self.r)
def set_essence(self, essence):
"""Set new routes and value: use with result of get_essence."""
self.k, self.dist, self.r = essence
def get_successors(self):
"""Return an array of nodes' successors, 0 for depot."""
data = [0] * (self.task.N+1)
for route in self.r:
for a, b, _, _ in route[R_EDG][1:]:
data[a] = b
return data
def get_signature(self):
"""Return a hex digest of the solution."""
import hashlib
return hashlib.md5("-".join(str(succ) for succ in self.get_successors())).hexdigest()
def infoline(self):
return "(%d, %.2f) (%5.1f%%, %5.1f%%)" % (self.val()+self.percentage())
def propagate_arrival_ref(sol, rr, pos):
edges = rr[R_EDG]
time = sol.task.time
cust = sol.task.cust
a, b, arr_a, _ = edges[pos]
for idx in xrange(pos+1, len(edges)):
b, _, old_arrival, _ = edges[idx]
new_arrival = max(arr_a + time[a][b], cust[b][A])
# check, if there is a modification
if new_arrival == old_arrival:
break
u.set(edges[idx], E_ARF, new_arrival)
a = b
arr_a = new_arrival
def propagate_arrival(sol, r, pos):
"""Update arrivals (actual service begin) on a route after pos."""
edges = sol.r[r][R_EDG]
time = sol.task.time
cust = sol.task.cust
a, b, arr_a, _ = edges[pos]
for idx in xrange(pos+1, len(edges)):
b, _, old_arrival, _ = edges[idx]
new_arrival = max(arr_a + time[a][b], cust[b][A])
# check, if there is a modification
if new_arrival == old_arrival:
break
u.set(edges[idx], E_ARF, new_arrival)
a = b
arr_a = new_arrival
def propagate_deadline_ref(sol, rr, pos):
"""Update deadlines (latest legal service begin) on a route before pos."""
edges = rr[R_EDG]
_, b, _, larr_b = edges[pos]
time = sol.task.time
cust = sol.task.cust
for idx in xrange(pos-1, -1, -1):
_, a, _, old_deadline = edges[idx]
new_deadline = min(larr_b-time[a][b], cust[a][B])
# check, if there is a modification
if new_deadline == old_deadline:
break
u.set(edges[idx], E_LAT, new_deadline)
b = a
larr_b = new_deadline
def propagate_deadline(sol, r, pos):
"""Update deadlines (latest legal service begin) on a route before pos."""
edges = sol.r[r][R_EDG]
_, b, _, larr_b = edges[pos]
time = sol.task.time
cust = sol.task.cust
for idx in xrange(pos-1, -1, -1):
_, a, _, old_deadline = edges[idx]
new_deadline = min(larr_b-time[a][b], cust[a][B])
# check, if there is a modification
if new_deadline == old_deadline:
break
u.set(edges[idx], E_LAT, new_deadline)
b = a
larr_b = new_deadline
# THE MODEL - basic operations on a solution (through UndoStack
def insert_new(sol, c):
"""Inserts customer C on a new route."""
new_route = [
2, # number of edges
sol.dem(c), # demand on route
sol.d(0,c)+sol.d(c,0), # distance there and back
[
[0, c, 0, sol.b(c)], # depot -> c
[c, 0, max(sol.t(0,c), sol.a(c)), sol.b(0)] # c -> depot
]
]
u.ins(sol.r, sol.k, new_route)
u.atr(sol, 'k', sol.k+1) # route no inc
u.atr(sol, 'dist', sol.dist+new_route[R_DIS]) # total distance inc
def insert_at_pos(sol, c, r, pos):
"""Inserts c into route ad pos. Does no checks."""
# update edges (with arival times)
edges = sol.r[r][R_EDG]
# old edge
a, b, arr_a, larr_b = u.pop(edges, pos)
# arrival and latest arrival time to middle
arr_c = max(arr_a + sol.t(a, c), sol.a(c))
larr_c = min(sol.b(c), larr_b-sol.t(c, b))
assert arr_c <= larr_c, 'invalid insertion, time window violated'
# new edges - second then first
u.ins(edges, pos, [c, b, arr_c, larr_b])
u.ins(edges, pos, [a, c, arr_a, larr_c])
# propagate time window constraints - forward
propagate_arrival(sol, r, pos+1)
# propagate time window constraints - backward
propagate_deadline(sol, r, pos)
# update distances
dinc = sol.d(a, c)+sol.d(c, b)-sol.d(a, b)
u.add(sol.r[r], R_DIS, dinc)
u.ada(sol, 'dist', dinc)
# update capacity
u.add(sol.r[r], R_CAP, sol.dem(c))
# update count
u.add(sol.r[r], R_LEN, 1)
def find_bestpos_on(sol, c, r):
"""Finds best position to insert customer on existing route."""
# check capacity
if sol.r[r][R_CAP] + sol.dem(c) > sol.task.capa:
return None, None
# pull out deep things locally
time = sol.task.time
cust = sol.task.cust
dist = sol.task.dist
c_a = cust[c][A]
c_b = cust[c][B]
def eval_edge(pack):
pos, (a, b, arr_a, larr_b) = pack
arr_c = max(arr_a + time[a][c], c_a) # earliest possible
larr_c = min(c_b, larr_b-time[c][b]) # latest if c WAS here
larr_a = min(sol.b(a), larr_c-time[a][c])
if arr_c <= larr_c and arr_a <= larr_a:
return (-(dist[a][c] + dist[c][b] - dist[a][b]), pos)
return None, None
# find the best edge
return max(map(eval_edge, enumerate(sol.r[r][R_EDG])))
def find_bestpos(sol, c):
"""Find best positions on any route, return the route pos and distance.
The exact format is a nested tuple: ((-dist increase, position), route)"""
bdp = (None, None)
br = None
for i in xrange(sol.k):
for m in find_allpos_on(sol, c, i):
if m > bdp:
bdp = m
br = i
return (bdp, br)
def insert_customer(sol, c):
"""Insert customer at best position or new route."""
if sol.k == 0:
insert_new(sol, c)
return sol.k-1, 0
else:
# best distinc, best pos, best route
(bd, bp), br = find_bestpos(sol, c)
# found some route to insert
if not bd is None:
insert_at_pos(sol, c, br, bp)
return br, bp
else:
insert_new(sol, c)
return sol.k-1, 0
def remove_customer(sol, r, pos):
"""Remove customer at pos from a route and return his ID."""
assert pos < sol.r[r][R_LEN], 'removal past route end'
edges = sol.r[r][R_EDG]
a, b, arr_a, larr_b = u.pop(edges, pos)
d, c, arr_b, larr_c = u.pop(edges, pos)
assert b == d, 'adjacent edges do not meet in one node'
if sol.r[r][R_LEN] == 2: # last customer - remove route
rt = u.pop(sol.r, r)
# solution route count decrease
u.ada(sol, 'k', -1)
# solution distance decrease
u.ada(sol, 'dist', -rt[R_DIS])
return b
assert arr_a + sol.t(a, c) < larr_c, 'time window error after removal'
u.ins(edges, pos, [a, c, arr_a, larr_c])
# propagating time window constraints
propagate_arrival(sol, r, pos)
propagate_deadline(sol, r, pos)
# update distances (probably decrease)
dinc = sol.d(a, c)-sol.d(a, b)-sol.d(b, c)
u.add(sol.r[r], R_DIS, dinc)
u.ada(sol, 'dist', dinc)
# update capacity
u.add(sol.r[r], R_CAP, -sol.dem(b))
# update count
u.add(sol.r[r], R_LEN, -1)
return b
def find_allpos_on(sol, c, r, startpos=0):
"""Find all positions where customer c can be inserted on route r
and return them as tuples (distinc, position)."""
# check capacity
if sol.r[r][R_CAP] + sol.dem(c) > sol.task.capa:
return
# check route edges
edges = sol.r[r][R_EDG]
time = sol.task.time
cust = sol.task.cust
dist = sol.task.dist
c_a = cust[c][A]
c_b = cust[c][B]
for pos in xrange(startpos, sol.r[r][R_LEN]):
a, b, arr_a, larr_b = edges[pos]
if c_a > larr_b:
# too early
continue
if arr_a > c_b:
# too late
break
arr_c = max(arr_a + time[a][c], c_a) # earliest possible
larr_c = min(c_b, larr_b-time[c][b]) # latest if c WAS here
larr_a = min(cust[a][B], larr_c-time[a][c])
if arr_c <= larr_c and not arr_a <= larr_a:
print "yes, this ever happens..."
if arr_c <= larr_c and arr_a <= larr_a:
# for some cases distinc in optional...
distinc = -(dist[a][c] + dist[c][b] - dist[a][b])
yield (distinc, pos)
def join_routes(sol, r1, r2):
"""Append r2 to the end of r1. Currently irreversible."""
# print sol.r[r1][R_EDG]
# print sol.r[r2][R_EDG]
c, _, arr_c, _ = sol.r[r1][R_EDG].pop()
_, d, _, larr_d = sol.r[r2][R_EDG].pop(0)
pos = sol.r[r1][R_LEN]-1
saving = sol.d(c, 0) + sol.d(0, d) - sol.d(c, d)
sol.r[r1][R_EDG].append([c, d, arr_c, larr_d])
sol.r[r1][R_EDG].extend(sol.r[r2][R_EDG])
sol.r[r1][R_LEN] += sol.r[r2][R_LEN]-1
sol.r[r1][R_CAP] += sol.r[r2][R_CAP]
sol.r[r1][R_DIS] += sol.r[r2][R_DIS] - saving
propagate_arrival(sol, r1, pos)
propagate_deadline(sol, r1, pos)
# print sol.r[r1][R_EDG]
sol.r.pop(r2)
sol.k -= 1
sol.dist -= saving
def join_routes_ref(sol, rr1, rr2):
c, _, arr_c, _ = rr1[R_EDG].pop()
_, d, _, larr_d = rr2[R_EDG].pop(0)
pos = rr1[R_LEN]-1
saving = sol.d(c, 0) + sol.d(0, d) - sol.d(c, d)
rr1[R_EDG].append([c, d, arr_c, larr_d])
rr1[R_EDG].extend(rr2[R_EDG])
rr1[R_LEN] += rr2[R_LEN]-1
rr1[R_CAP] += rr2[R_CAP]
rr1[R_DIS] += rr2[R_DIS] - saving
propagate_arrival_ref(sol, rr1, pos)
propagate_deadline_ref(sol, rr1, pos)
# print sol.r[r1][R_EDG]
sol.r.remove(rr2)
sol.k -= 1
sol.dist -= saving
| Python |
# tuple indices in customer tuple:
# number, coordinates(X,Y), demand, ready(A), due(B), service time
ID, X, Y, DEM, A, B, SRV = range(7)
# list indices in route list structure:
# route len (num edges), capacity, total distance, edge list
R_LEN, R_CAP, R_DIS, R_EDG = range(4)
# list indices in edge list structure (in route edge list)
# customer "a" id, customer "b" id, arrival at "a", latest at "b"
E_FRO, E_TOW, E_ARF, E_LAT = range(4)
| Python |
#!/usr/bin/env python
import sys
import pstats
import os
def main():
if len(sys.argv) == 1:
print "\nUsage: %s profile_output [sort_order] [profile_output...]" % sys.argv[0]
print """
Where sort order may be: time (default), cumulative, ...
(taken from documentation:)
Valid Arg Meaning
'calls' call count
'cumulative' cumulative time
'file' file name
'module' file name
'pcalls' primitive call count
'line' line number
'name' function name
'nfl' name/file/line
'stdname' standard name
'time' internal time
"""
exit()
# the process
order = 'time'
files = filter(os.path.exists, sys.argv[1:])
extra = list(set(sys.argv[1:])-set(files))
if len(extra) > 0:
order = extra[0]
if len(extra) > 1:
print "Warning: excess args ignored:", extra[1:]
s = pstats.Stats(files[0])
map(s.add, files[1:])
num_rows = int(os.getenv('ROWS', '20'))
s.sort_stats(order).print_stats(num_rows)
if __name__ == '__main__':
main()
| Python |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'helper.ui'
#
# Created: Mon Aug 29 16:54:25 2011
# by: PyQt4 UI code generator 4.8.3
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
_fromUtf8 = lambda s: s
class Ui_Helper(object):
def setupUi(self, Helper):
Helper.setObjectName(_fromUtf8("Helper"))
Helper.resize(661, 701)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(1)
sizePolicy.setHeightForWidth(Helper.sizePolicy().hasHeightForWidth())
Helper.setSizePolicy(sizePolicy)
self.verticalLayout = QtGui.QVBoxLayout(Helper)
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.widget = QtGui.QWidget(Helper)
self.widget.setMinimumSize(QtCore.QSize(0, 0))
self.widget.setObjectName(_fromUtf8("widget"))
self.horizontalLayout_2 = QtGui.QHBoxLayout(self.widget)
self.horizontalLayout_2.setMargin(0)
self.horizontalLayout_2.setObjectName(_fromUtf8("horizontalLayout_2"))
self.groupBox_2 = QtGui.QGroupBox(self.widget)
self.groupBox_2.setObjectName(_fromUtf8("groupBox_2"))
self.horizontalLayout_3 = QtGui.QHBoxLayout(self.groupBox_2)
self.horizontalLayout_3.setObjectName(_fromUtf8("horizontalLayout_3"))
self.families = QtGui.QListWidget(self.groupBox_2)
self.families.setMinimumSize(QtCore.QSize(30, 80))
self.families.setObjectName(_fromUtf8("families"))
QtGui.QListWidgetItem(self.families)
QtGui.QListWidgetItem(self.families)
QtGui.QListWidgetItem(self.families)
QtGui.QListWidgetItem(self.families)
QtGui.QListWidgetItem(self.families)
QtGui.QListWidgetItem(self.families)
QtGui.QListWidgetItem(self.families)
QtGui.QListWidgetItem(self.families)
QtGui.QListWidgetItem(self.families)
QtGui.QListWidgetItem(self.families)
QtGui.QListWidgetItem(self.families)
QtGui.QListWidgetItem(self.families)
QtGui.QListWidgetItem(self.families)
QtGui.QListWidgetItem(self.families)
QtGui.QListWidgetItem(self.families)
QtGui.QListWidgetItem(self.families)
QtGui.QListWidgetItem(self.families)
QtGui.QListWidgetItem(self.families)
QtGui.QListWidgetItem(self.families)
QtGui.QListWidgetItem(self.families)
self.horizontalLayout_3.addWidget(self.families)
self.best = QtGui.QPushButton(self.groupBox_2)
self.best.setObjectName(_fromUtf8("best"))
self.horizontalLayout_3.addWidget(self.best)
self.horizontalLayout_2.addWidget(self.groupBox_2)
self.groupBox_4 = QtGui.QGroupBox(self.widget)
self.groupBox_4.setObjectName(_fromUtf8("groupBox_4"))
self.verticalLayout_2 = QtGui.QVBoxLayout(self.groupBox_4)
self.verticalLayout_2.setObjectName(_fromUtf8("verticalLayout_2"))
self.groupBox = QtGui.QGroupBox(self.groupBox_4)
self.groupBox.setObjectName(_fromUtf8("groupBox"))
self.horizontalLayout = QtGui.QHBoxLayout(self.groupBox)
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
self.label = QtGui.QLabel(self.groupBox)
self.label.setObjectName(_fromUtf8("label"))
self.horizontalLayout.addWidget(self.label)
self.mi = QtGui.QDoubleSpinBox(self.groupBox)
self.mi.setDecimals(2)
self.mi.setSingleStep(0.05)
self.mi.setProperty(_fromUtf8("value"), 1.0)
self.mi.setObjectName(_fromUtf8("mi"))
self.horizontalLayout.addWidget(self.mi)
self.has_waitlimit = QtGui.QCheckBox(self.groupBox)
self.has_waitlimit.setObjectName(_fromUtf8("has_waitlimit"))
self.horizontalLayout.addWidget(self.has_waitlimit)
self.waitlimit = QtGui.QSpinBox(self.groupBox)
self.waitlimit.setEnabled(False)
self.waitlimit.setMaximum(120)
self.waitlimit.setSingleStep(30)
self.waitlimit.setObjectName(_fromUtf8("waitlimit"))
self.horizontalLayout.addWidget(self.waitlimit)
self.mfs = QtGui.QCheckBox(self.groupBox)
self.mfs.setObjectName(_fromUtf8("mfs"))
self.horizontalLayout.addWidget(self.mfs)
self.update = QtGui.QPushButton(self.groupBox)
self.update.setObjectName(_fromUtf8("update"))
self.horizontalLayout.addWidget(self.update)
self.verticalLayout_2.addWidget(self.groupBox)
self.groupBox_3 = QtGui.QGroupBox(self.groupBox_4)
self.groupBox_3.setObjectName(_fromUtf8("groupBox_3"))
self.horizontalLayout_4 = QtGui.QHBoxLayout(self.groupBox_3)
self.horizontalLayout_4.setObjectName(_fromUtf8("horizontalLayout_4"))
self.greedyOrder = QtGui.QComboBox(self.groupBox_3)
self.greedyOrder.setObjectName(_fromUtf8("greedyOrder"))
self.horizontalLayout_4.addWidget(self.greedyOrder)
self.greedy = QtGui.QPushButton(self.groupBox_3)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.greedy.sizePolicy().hasHeightForWidth())
self.greedy.setSizePolicy(sizePolicy)
self.greedy.setObjectName(_fromUtf8("greedy"))
self.horizontalLayout_4.addWidget(self.greedy)
self.verticalLayout_2.addWidget(self.groupBox_3)
self.horizontalLayout_2.addWidget(self.groupBox_4)
self.verticalLayout.addWidget(self.widget)
self.textEdit = QtGui.QTextEdit(Helper)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.textEdit.sizePolicy().hasHeightForWidth())
self.textEdit.setSizePolicy(sizePolicy)
self.textEdit.setReadOnly(True)
self.textEdit.setObjectName(_fromUtf8("textEdit"))
self.verticalLayout.addWidget(self.textEdit)
self.progressBar = QtGui.QProgressBar(Helper)
self.progressBar.setMaximum(100)
self.progressBar.setProperty(_fromUtf8("value"), 0)
self.progressBar.setObjectName(_fromUtf8("progressBar"))
self.verticalLayout.addWidget(self.progressBar)
self.clearPlot = QtGui.QPushButton(Helper)
self.clearPlot.setObjectName(_fromUtf8("clearPlot"))
self.verticalLayout.addWidget(self.clearPlot)
self.retranslateUi(Helper)
self.families.setCurrentRow(-1)
QtCore.QObject.connect(self.has_waitlimit, QtCore.SIGNAL(_fromUtf8("toggled(bool)")), self.waitlimit.setEnabled)
QtCore.QMetaObject.connectSlotsByName(Helper)
def retranslateUi(self, Helper):
Helper.setWindowTitle(QtGui.QApplication.translate("Helper", "Route construction testing", None, QtGui.QApplication.UnicodeUTF8))
self.groupBox_2.setTitle(QtGui.QApplication.translate("Helper", "Test group selection", None, QtGui.QApplication.UnicodeUTF8))
__sortingEnabled = self.families.isSortingEnabled()
self.families.setSortingEnabled(False)
self.families.item(0).setText(QtGui.QApplication.translate("Helper", "solomons/*.txt", None, QtGui.QApplication.UnicodeUTF8))
self.families.item(1).setText(QtGui.QApplication.translate("Helper", "solomons/c*.txt", None, QtGui.QApplication.UnicodeUTF8))
self.families.item(2).setText(QtGui.QApplication.translate("Helper", "solomons/c1*.txt", None, QtGui.QApplication.UnicodeUTF8))
self.families.item(3).setText(QtGui.QApplication.translate("Helper", "solomons/c2*.txt", None, QtGui.QApplication.UnicodeUTF8))
self.families.item(4).setText(QtGui.QApplication.translate("Helper", "solomons/r[12]*.txt", None, QtGui.QApplication.UnicodeUTF8))
self.families.item(5).setText(QtGui.QApplication.translate("Helper", "solomons/r1*.txt", None, QtGui.QApplication.UnicodeUTF8))
self.families.item(6).setText(QtGui.QApplication.translate("Helper", "solomons/r2*.txt", None, QtGui.QApplication.UnicodeUTF8))
self.families.item(7).setText(QtGui.QApplication.translate("Helper", "solomons/rc*.txt", None, QtGui.QApplication.UnicodeUTF8))
self.families.item(8).setText(QtGui.QApplication.translate("Helper", "solomons/rc1*.txt", None, QtGui.QApplication.UnicodeUTF8))
self.families.item(9).setText(QtGui.QApplication.translate("Helper", "solomons/rc2*.txt", None, QtGui.QApplication.UnicodeUTF8))
self.families.item(10).setText(QtGui.QApplication.translate("Helper", "hombergers/*_2??.txt", None, QtGui.QApplication.UnicodeUTF8))
self.families.item(11).setText(QtGui.QApplication.translate("Helper", "hombergers/c?_2*.txt", None, QtGui.QApplication.UnicodeUTF8))
self.families.item(12).setText(QtGui.QApplication.translate("Helper", "hombergers/c1_2*.txt", None, QtGui.QApplication.UnicodeUTF8))
self.families.item(13).setText(QtGui.QApplication.translate("Helper", "hombergers/c2_2*.txt", None, QtGui.QApplication.UnicodeUTF8))
self.families.item(14).setText(QtGui.QApplication.translate("Helper", "hombergers/r[12]_2*.txt", None, QtGui.QApplication.UnicodeUTF8))
self.families.item(15).setText(QtGui.QApplication.translate("Helper", "hombergers/r1_2*.txt", None, QtGui.QApplication.UnicodeUTF8))
self.families.item(16).setText(QtGui.QApplication.translate("Helper", "hombergers/r2_2*.txt", None, QtGui.QApplication.UnicodeUTF8))
self.families.item(17).setText(QtGui.QApplication.translate("Helper", "hombergers/rc?_2*.txt", None, QtGui.QApplication.UnicodeUTF8))
self.families.item(18).setText(QtGui.QApplication.translate("Helper", "hombergers/rc1_2*.txt", None, QtGui.QApplication.UnicodeUTF8))
self.families.item(19).setText(QtGui.QApplication.translate("Helper", "hombergers/rc2_2*.txt", None, QtGui.QApplication.UnicodeUTF8))
self.families.setSortingEnabled(__sortingEnabled)
self.best.setText(QtGui.QApplication.translate("Helper", "Plot best", None, QtGui.QApplication.UnicodeUTF8))
self.groupBox_4.setTitle(QtGui.QApplication.translate("Helper", "Construction heuristic", None, QtGui.QApplication.UnicodeUTF8))
self.groupBox.setTitle(QtGui.QApplication.translate("Helper", "Savings heuristic", None, QtGui.QApplication.UnicodeUTF8))
self.label.setText(QtGui.QApplication.translate("Helper", "µ param", None, QtGui.QApplication.UnicodeUTF8))
self.has_waitlimit.setText(QtGui.QApplication.translate("Helper", "use waitlimit", None, QtGui.QApplication.UnicodeUTF8))
self.mfs.setText(QtGui.QApplication.translate("Helper", "mfs", None, QtGui.QApplication.UnicodeUTF8))
self.update.setText(QtGui.QApplication.translate("Helper", "Plot", None, QtGui.QApplication.UnicodeUTF8))
self.groupBox_3.setTitle(QtGui.QApplication.translate("Helper", "Greedy build first", None, QtGui.QApplication.UnicodeUTF8))
self.greedy.setText(QtGui.QApplication.translate("Helper", "Plot", None, QtGui.QApplication.UnicodeUTF8))
self.textEdit.setHtml(QtGui.QApplication.translate("Helper", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'Ubuntu\'; font-size:9pt; font-weight:400; font-style:normal;\">\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\">Welcome to route construction tester. This is a notification console.</p></body></html>", None, QtGui.QApplication.UnicodeUTF8))
self.clearPlot.setText(QtGui.QApplication.translate("Helper", "Reset plot", None, QtGui.QApplication.UnicodeUTF8))
| Python |
#!/usr/bin/env python
import re
import sys
def smart_input(prompt, history=None, suggestions=[], info=None):
from collections import deque
def ensure_file(f):
import os
if os.path.exists(f):
return f
if not os.path.exists(os.path.dirname(f)):
os.makedirs(os.path.dirname(f))
open(f, 'w')
return f
def inner_loop(default, suggestions, history):
while True:
question = "%s (%s): " % (prompt, default)
ans = raw_input(question)
if ans == '':
return default
if ans == ' ':
if not len(history):
print "No history provided..."
continue
if len(history) == 1 and default == history[0]:
print "No other history items."
history.rotate(-1)
default = history[0]
continue
if ans == ' ':
if not len(history):
print "No history provided..."
continue
if len(history) == 1 and default == history[0]:
print "No other history items."
history.rotate(1)
default = history[0]
continue
if ans == '`':
if not len(suggestions):
print "No suggestions provided..."
continue
if len(suggestions) == 1 and default == suggestions[0]:
print "No other suggestions."
suggestions.rotate(-1)
default = suggestions[0]
continue
if ans == '``':
if not len(suggestions):
print "No suggestions provided..."
continue
if len(suggestions) == 1 and default == suggestions[0]:
print "No other suggestions."
suggestions.rotate(1)
default = suggestions[0]
continue
if ans[0] == '+':
return default + ans[1:]
if ans == '~':
if default == history[0]:
print "Removing '%s' from history." % default
history.popleft()
default = ''
continue
# all other cases:
return ans
suggestions = deque(suggestions)
default = suggestions[0] if len(suggestions) else ''
if history is None:
hist = deque()
else:
hist = deque(map(str.strip, open(ensure_file(history))))
# print hist
result = inner_loop(default, suggestions, hist)
if not history is None:
if result in hist:
hist.remove(result)
hist.appendleft(result)
open(history,'w').write("\n".join(hist))
return result
def run_job(task_portion, wall, auto = False, extra = '', prefix='vrptw'):
from subprocess import Popen, PIPE
from os import getcwd
# some task preparation (if it wasn't a file, but a test name?)
script = """
cd %s
pwd
date """ % getcwd() + "".join("""
./pygrout.py %s --wall %d %s
date """ % (extra, wall, task) for task in task_portion)
# prepare jobname
jobname = re.sub('.txt|hombergers/|solomons/', '', prefix+'_' + task_portion[0])
command = 'qsub -l nodes=1:nehalem -l walltime=%d -N %s -e /tmp' % (
(wall+60)*len(task_portion), jobname)
if not auto:
print "About to pipe: \n%s\n to the command: \n%s\n\nPress Enter" % (
script, command)
raw_input()
output, errors = Popen(command, shell=True, stdin=PIPE,
stdout=PIPE, stderr=PIPE).communicate(script)
print "Process returned", repr((output, errors))
return command, script, jobname, output.strip()
def main():
import datetime
# pbs_opts = smart_input('PBS options', 'output/.pbs/options',
# ['-l nodes=1:nehalem -l walltime=20000'])
# tasks = smart_input('Tasks [glob pattern]', 'output/.pbs/tasks',
# ['solomons/', 'hombergers/', 'hombergers/*_2??.txt'])
# pygrout_opts = smart_input('pygrout options', 'output/.pbs/pygroupts',
# ['--strive --wall 600', '--wall '])
if len(sys.argv) < 2:
print "No arguments (tasks) provided"
return
tasks = sys.argv[1:]
wall = int(smart_input('Enter wall time (per task)', suggestions=[2000]))
total = len(tasks)*(wall+60)
print "There are %d tasks, which makes %s s (%02d:%02d:%02d) total." % (
len(tasks), total, total/3600, total%3600/60, total%60)
print "A single task is %02d:%02d" % (wall/60+1, wall%60)
per_job = int(smart_input('How many task per job', suggestions=[20]))
total = per_job*(wall+60)
print "A single job will run %02d:%02d:%02d" % (total/3600,
total%3600/60, total%60)
extra = smart_input('Extra args for pygrout', suggestions=[''])
job_name = smart_input('Job name prefix', suggestions=['vrptw'])
auto = raw_input('Confirm single jobs (Y/n)?')=='n'
jobs = []
for i in xrange(0, len(tasks), per_job):
jobs.append(run_job(tasks[i:i+per_job], wall, auto, extra, job_name))
log = "\n".join("""
Command: %s
Script: %s
Job name: %s
Job id: %s
""" % tup for tup in jobs)
open('output/%s.log.txt' % datetime.datetime.now().isoformat(), 'w').write(log)
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/env python
import os
import re
import glob
import textwrap
from collections import defaultdict
# regex to remove the non-setname part of name
cutoff = re.compile('-.*')
fnparse = re.compile("""
(?P<name>[rcRC]{1,2}[12](?:\d{2}|[\d_]{4}))
-(?P<pk>[\d.]+)-(?P<pdist>[\d.]+)
-(?P<k>\d+)-(?P<dist>[\d.]+)-""", re.VERBOSE)
class smallstat(object):
"""Like a little defaultdict(int), but contigous."""
def __init__(self):
self.data = [0]
def inc(self, idx, inc=1):
"""Increment the index idx by inc(=1), create in needed."""
if idx >= len(self.data):
self.data.extend([0]*(1+idx-len(self.data)))
self.data[idx] += inc
def multibar(*args, **kwargs):
"""Plot multiple bar charts like for comparison, with pylab."""
from pylab import bar, show
from itertools import cycle
import numpy as np
width = kwargs.setdefault('width', 0.8)
left = kwargs.setdefault('left', None)
colors = cycle(kwargs.setdefault('colors', 'brgmcyk'))
offset = 0
delta = width/len(args)
results = []
for arg in args:
if type(arg) is tuple:
myleft, arg = arg
myleft = np.array(myleft) + offset
elif not left is None:
myleft = np.array(left) + offset
else:
myleft = np.arange(len(arg)) + offset
res = bar(myleft, arg, width=delta, color=colors.next())
results.append(res)
offset += delta
return results
def find_medium(test):
"""Glob and return all but the 'smallest' and 'largest' files."""
# missing case-insensitive glob. Besides, this Solomon's mess
# with capital C, R and RC might be worth cleaning up...
return sorted(glob.glob(test+'*.p')+glob.glob(test.upper()+'*.p'))[1:-1]
def read_as_set(f):
"""Read the file and return set of lines."""
return set(map(str.strip, open(f)))
def split_groups(s):
"""Insert newlines before first occurence of a group."""
for g in "c2 r1 r2 rc1 rc2".split():
s, _ = re.subn(r"\s%s"%g, "\n%s"%g, s, count=1)
return s
def printf(set_):
"""Used to display a set, with count, sorted and textwrapped."""
print "(%d)"%len(set_)
un_derscore = lambda x: x.replace('_', '0')
splat = split_groups(" ".join(sorted(set_, key=un_derscore)))
print "\n\n".join(textwrap.fill(l) for l in splat.split("\n"))
def print_grouped(sum_of_all):
"""Output with printf, but Solomon and Homberger separately."""
# Junk suppressed:
"""
print "All found results are:"
printf(sum_of_all)
print "Including junk:"
printf(sum_of_all.difference(
sel_solomons(sum_of_all), sel_homberger(sum_of_all)))
"""
# helpers (maybe later global)
def sel_solomons(set_):
"""Select Solomon test names (only full 100 customer)."""
return set(filter(re.compile('r?c?\d{3}$').match, set_))
def sel_homberger(set_):
"""Select Solomon test names (only full 100 customer)."""
return set(filter(re.compile('r?c?[0-9_]{5}$').match, set_))
print "Full Solomon tests:"
printf(sel_solomons(sum_of_all))
print "Homberger tests:"
printf(sel_homberger(sum_of_all))
def compare(*args):
"""Read in the passed files and display differences."""
if len(args) < 2:
print "Provide at least two filenames to compare."
return
if len(args) > 2:
print "Warning: only 2 files work now."
first, secnd = map(read_as_set, args[:2])
print "Only in%s:" % args[0]
print_grouped(first.difference(secnd))
print "Only in %s:" % args[1]
print_grouped(secnd.difference(first))
print "In both:"
print_grouped(first.intersection(secnd))
def union(*args):
"""Read in the passed files and display the union (set sum)."""
if len(args) < 1:
print "Provide at least two filenames to add together."
return
sets = map(read_as_set, args)
sum_of_all = set.union(*sets)
print_grouped(sum_of_all)
def raw_union(*args):
"""Print the union of arguments, one per line, no bubblegum."""
return "\n".join(sorted(set.union(*map(read_as_set, args))))
def raw_intersection(*args):
"""Print the union of arguments, one per line, no bubblegum."""
return "\n".join(sorted(set.intersection(*map(read_as_set, args))))
def intersection(*args):
"""Set intersection of one (two) or more files."""
if len(args) < 1:
print "Provide at least two filenames to intersect."
return
sets = map(read_as_set, args)
product_of_all = set.intersection(*sets)
print "The elements repeating all over again are:"
print_grouped(product_of_all)
def progress(*args):
"""Compare a list of files, displaying new items, not found before."""
sets = map(read_as_set, args)
total = set()
for arg, set_ in zip(args, sets):
if len(set_.difference(total)):
print "\n *** New things found in %s:" % arg
print_grouped(set_.difference(total))
else:
print "\n ... Nothing new in %s:" % arg
total.update(set_)
def missing(*args):
"""List problem sets which are missing from all the arguments."""
def gen_hombergers():
"""Set of all Homberger instance names."""
return set([ c+n+s+x
for c in ['c','r','rc']
for n in ['1', '2']
for s in ['_2','_4','_6','_8','10']
for x in (['_%d' % i for i in xrange(1,10)]+['10']) ])
def gen_solomons():
"""Set of all Solomon instance names."""
stats = [
('c1', 9), ('c2', 8),
('r1', 12), ('r2', 11),
('rc1', 8), ('rc2', 8) ]
return set([ '%s%02d' % (fam, num)
for fam, count in stats
for num in xrange(1, count+1) ])
sum_of_all = set.union(*map(read_as_set, args))
hombergers = gen_hombergers()
print "Missing Homberger tests:"
difference = hombergers.difference(sum_of_all)
if difference == hombergers:
print "(ALL %d)" % len(hombergers)
else:
printf(difference)
solomons = gen_solomons()
print "Missing Solomon tests:"
difference = solomons.difference(sum_of_all)
if difference == solomons:
print "(ALL %d)" % len(solomons)
else:
printf(difference)
def main():
"""Main function - clean up a typical /output (sub)directory."""
# helpers
def create_file(fn, set_):
if not os.path.exists(fn):
open(fn, 'w').write("\n".join(sorted(set_)))
else:
present = read_as_set(fn)
if present <> set_:
print "File %s present, but inconsistent, differences" % fn
printf(present.symmetric_difference(set_))
# ensure directory for best results (k == 100%)
if not os.path.exists('100s') and os.path.basename(os.getcwd()) <> '100s':
print "Creating directory 100s (best-k results)"
os.makedirs('100s')
else:
print "Directory 100s already present"
# move best results to their directory (also their .vrp companions)
solved = re.compile('[^-]+-100.0-.*')
sol_ok = filter(solved.match, glob.glob('*.*'))
if len(sol_ok):
print "Moving %d best-k results to 100s:" % len(sol_ok)
for f in sol_ok:
print f
os.rename(f, os.path.join('100s',f))
else:
print "No best-k results found here."
# ensure there is an up-to-date all_list.txt, read results
present = set(glob.glob('*.p'))
if os.path.exists('all_list.txt'):
files = read_as_set('all_list.txt')
if not files >= present:
print "all_list.txt missing files:"
printf(present.difference(files))
files = files.union(present)
open('all_list.txt', 'w').write("\n".join(sorted(files)))
else:
# there was no all_list.txt
open('all_list.txt', 'w').write("\n".join(sorted(present)))
files = present
# grouping of the results to different sets
sets_bad = set(cutoff.sub('', f).lower() for f in files)
# good sets are always in the
sets_good = set(cutoff.sub('', f.replace('100s/','')).lower()
for f in glob.glob("100s/*.p"))
##sets_sometimes = sets_bad.intersection(sets_good)
sets_always = sets_good.difference(sets_bad)
sets_never = sets_bad.difference(sets_good)
# print summaries (for every run)
print "\nBad results:"
print_grouped(sets_bad)
print "\nGood results:"
print_grouped(sets_good)
# quieting down somewhat
"""
print "\nSolved sometimes:"
printf(sets_sometimes)
print "\nSolved never:"
printf(sets_never)
print "\nSolved always:"
printf(sets_always)
"""
# remove junk - medium solutions (conditionally)
if len(present) > 2*len(sets_bad):
if 'y' == raw_input('Delete medium solutions (y/N)?'):
for i in sets_bad:
moritures = find_medium(i)
print i, len(moritures)
for f in moritures:
print "Removing", f, "..."
os.unlink(f)
# create lists for bad, never and sometimes
create_file('never.txt', sets_never)
create_file('bad.txt', sets_bad) # broadest
##create_file('sometimes.txt', sets_sometimes)
##create_file('100s/sometimes.txt', sets_sometimes)
create_file('100s/good.txt', sets_good) # broadest
create_file('100s/always.txt', sets_always)
raw_input('Done. Press ENTER')
def draw_map(colors = defaultdict((lambda: ('w', '/')))):
"""Plot tests (solutions) as squares in color with mpl"""
sol_counts = dict([('c1', 9), ('c2', 8), ('r1', 12), ('r2', 11),
('rc1', 8), ('rc2', 8) ])
from matplotlib.pyplot import subplot, show, bar, title
from itertools import cycle
groups = 'c1 r1 rc1 c2 r2 rc2'.split()
for i in xrange(6):
subplot(230+i+1)
for j in xrange(sol_counts[groups[i]]):
name = groups[i]+ "%02d" % (j+1)
print name, j, 0, colors[name]
bar(j, 0.8, color=colors[name][0], hatch=colors[name][1])
base = 1
homb_numbers = ['_%d' % (n+1,) for n in xrange(9)]+['10']
for size in "_2 _4 _6 _8 10".split():
for j in xrange(10):
name = groups[i]+size+homb_numbers[j]
print name, j, base, colors[name]
bar(j, 0.8, bottom=base, color=colors[name][0], hatch=colors[name][1])
base += 2
title(groups[i])
show()
def scan_solutions(path = '.'):
"""Search specified directories (default: current) for solutions."""
data = dict()
for dirpath, _, filenames in os.walk(path):
for f in filenames:
m = fnparse.search(f)
if m:
print f, m.group()
d = m.groupdict()
data.setdefault(d['name'].lower(), []).append((int(d['k']), float(d['dist'])))
for s in data:
data[s].sort()
return data
def k_map():
"""Plot a route count summary for solutions in/below current directory."""
best = get_best_results()
results = scan_solutions('.')
data = defaultdict((lambda: ('white', '/')))
for r in results:
if best[r][0] >= results[r][0][0]:
data[r] = ('green', '')
elif best[r][0] + 1 == results[r][0][0]:
data[r] = ('yellow', '')
else:
data[r] = ('red', '')
draw_map(data)
def dist_map():
"""Plot a distance summary for solutions in/below current directory.
Solutions with wrong route count are marked black."""
best = get_best_results()
results = scan_solutions('.')
data = defaultdict((lambda: ('white', '/')))
for r in results:
if best[r][0] < results[r][0][0]:
data[r] = ('black', '')
elif best[r][1] * 1.01 >= results[r][0][1]:
data[r] = ('green', '')
elif best[r][1] * 1.05 >= results[r][0][1]:
data[r] = ('yellow', '')
else:
data[r] = ('red', '')
draw_map(data)
def get_best_results():
"""Load a dictionary with best known result tuples."""
import vrptw
pat = os.path.join(os.path.dirname(vrptw.__file__), 'bestknown', 'sum*')
data = {}
for summ in glob.glob(pat):
for line in open(summ):
test, k, dist = line.split()
data[test.lower()] = (int(k), float(dist))
return data
def deepmap(f, something):
"""Map a nested structure, keeping the layout."""
if type(something) == list:
return [deepmap(f, x) for x in something]
elif type(something) == tuple:
return tuple(deepmap(f, list(something)))
elif type(something) == dict:
return dict([(k, deepmap(f, something[k])) for k in something])
else:
return f(something)
def enter_ipython(extra_locals = dict()):
"""Run IPython embedded shell with added locals.
To debug a specific place in script just call:
enter_ipython(locals())
"""
locals().update(extra_locals)
import IPython
IPython.Shell.IPShellEmbed()()
def plot_excess_routes(*args):
"""Display a histogram of excess routes in solutions."""
best = get_best_results()
def get_stats(path):
stats = smallstat()
mem = set()
for dirpath, _, filenames in os.walk(path):
for f in filenames:
m = fnparse.search(f)
if m:
print f, m.group()
if m.group() in mem:
print "duplicate"
continue
mem.add(m.group())
d = m.groupdict()
bk = best[d['name'].lower()]
ex = int(d['k'])-bk[0]
# print d['name'], bk, ex
stats.inc(ex)
return stats.data
if len(args) == 0:
args = ['.']
from pylab import show, xlabel, ylabel, title, xticks,hist
multibar(*map(get_stats, args))
xlabel('Excess routes')
ylabel('No. of solutions')
std_title = os.path.basename(os.getcwd())
cust_title = raw_input('Enter title (%s): '%std_title)
title(cust_title if cust_title <> '' else std_title)
locs, _ = xticks()
if locs[1] < 1:
xticks(range(len(stats.data)))
print locs
show()
# global list of functions
from types import FunctionType
funcs = filter(lambda k: type(globals()[k])==FunctionType, globals().keys())
if __name__ == '__main__':
# my well-known "call-function-from-argv" design pattern
import sys
if len(sys.argv) > 1 and sys.argv[1] in funcs:
# call function passing other args as params
res = globals()[sys.argv[1]](*sys.argv[2:])
if not res is None:
print res
else:
print "Use one, out of a subset, of these:\n "+"\n ".join(funcs)
| Python |
#!/usr/bin/env python
import sys
import glob
from itertools import repeat
import matplotlib
matplotlib.use('Qt4Agg')
import pylab
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas, NavigationToolbar2QTAgg as NavigationToolbar
from matplotlib.figure import Figure
from PyQt4 import QtCore, QtGui
from ui_helper import Ui_Helper
class ArgMap(object):
"""A class for determining indexes of sets on the plot."""
def __init__(self):
self.reset()
def reset(self):
"""Empty the mapping and counters. Also initialize."""
self.d = {}
self.n = 0
self.ticks = []
self.ticklabels = []
def checkTick(self, el):
"""Called by addOne - checks if element is first of a family."""
norm = el.replace('_', '0')
if norm.find('01.') <> -1 or norm.find('06.') <> -1:
self.ticks.append(self.d[el])
self.ticklabels.append(el[el.index('/')+1:el.index('.')])
def addOne(self, el):
"""Single unckecked additon (use __call__ to add safely)."""
self.n = self.d[el] = self.n+1
self.checkTick(el)
def add(self, els):
"""Adding multiple elements from an iterable."""
map(self, els)
def __call__(self, el):
"""Calling the object does safe mapping of element to index."""
if el not in self.d:
self.addOne(el)
return self.d[el]
class Plot(object):
"""This encapsulates details connected with the plot."""
def __init__(self, helper):
self.helper = helper
self.fig = Figure(figsize=(600,600), dpi=72, facecolor=(1,1,1), edgecolor=(0,0,0))
self.ax_k = self.fig.add_subplot(211)
self.ax_d = self.fig.add_subplot(212)
# the canvas:
self.canvas = FigureCanvas(self.fig)
# and its toolbar
self.toolbar = NavigationToolbar(self.canvas, helper)
self.attachTo(helper.ui.verticalLayout)
self.argmap = ArgMap()
self._setup_plots()
self._update_ticks()
def _setup_plots(self):
self.ax_k.set_ylabel('route count')
self.ax_d.set_ylabel('total distance')
def _update_ticks(self):
self.ax_k.set_xlim((0, self.argmap.n+1))
self.ax_k.set_xticks(self.argmap.ticks)
self.ax_k.set_xticklabels(self.argmap.ticklabels)
self.ax_d.set_xlim((0, self.argmap.n+1))
self.ax_d.set_xticks(self.argmap.ticks)
self.ax_d.set_xticklabels(self.argmap.ticklabels)
def attachTo(self, layout):
layout.addWidget(self.canvas)
layout.addWidget(self.toolbar)
def reset(self):
"""Remove plotted data from the drawing area."""
self.argmap.reset()
self.ax_k.cla()
self.ax_d.cla()
self._setup_plots()
self._update_ticks()
self.canvas.draw()
def display(self, operation):
xcoords = map(self.argmap, operation.args)
lbl = operation.get_name()
self.ax_k.plot(xcoords, operation.ks, 'o', label=lbl)
self.ax_k.legend()
ymin, ymax = self.ax_k.get_ylim()
self.ax_k.set_ylim((ymin-1, ymax+1))
self.ax_d.plot(xcoords, operation.ds, '.', label=lbl)
self.ax_d.legend()
ymin, ymax = self.ax_d.get_ylim()
spread = (ymax - ymin)*.03
self.ax_d.set_ylim((ymin-spread, ymax+spread))
self._update_ticks()
self.canvas.draw()
class Operation(object):
"""An abstract operation for the sets to perform"""
def __init__(self, args):
if type(args) == str:
self.args = self.find_args(args)
else:
self.args = args
self.ks = []
self.ds = []
def find_args(self, argstr):
from glob import glob
return sorted(glob(argstr), key=lambda x: x.replace('_', '0'))
def get_name(self):
"""Description of operation, e.g. for plot label."""
return 'abstract'
def best_val(name):
"""The mapping function for best known value."""
from pygrout import VrptwTask
task = VrptwTask(name, False)
return task.bestval()
class BestOperation(Operation):
def get_iterator(self, worker):
return worker.p.imap(best_val, self.args)
def get_name(self):
return 'b.known'
def savings_val(task):
"""The mapping function for savings heuristic."""
name, waitlimit, mi = task
from pygrout import VrptwSolution, VrptwTask, build_by_savings
print "Should process", name
sol = VrptwSolution(VrptwTask(name))
build_by_savings(sol, waitlimit, mi)
return sol.val()
def mfsavings_val(task):
"""The mapping function for savings heuristic."""
name, waitlimit, mi = task
from pygrout import VrptwSolution, VrptwTask, build_by_mfsavings
print "Should process", name
sol = VrptwSolution(VrptwTask(name))
build_by_mfsavings(sol, waitlimit, mi)
return sol.val()
class SavingsOperation(Operation):
def __init__(self, args, mi, waitlimit, mfs=True):
Operation.__init__(self, args)
self.mi = mi
self.waitlimit = waitlimit
self.mfs = mfs
def get_iterator(self, worker):
tasks = zip(self.args, repeat(self.waitlimit), repeat(self.mi))
if self.mfs:
return worker.p.imap(mfsavings_val, tasks)
return worker.p.imap(savings_val, tasks)
def get_name(self):
desc =("mfs(%.1f)" if self.mfs else "sav(%.1f)") % self.mi
if self.waitlimit:
desc += "WL(%d)" % self.waitlimit
return desc
def greedy_val(task):
name, order = task
from pygrout import VrptwSolution, VrptwTask, build_first
VrptwTask.sort_order = order
sol = VrptwSolution(VrptwTask(name))
build_first(sol)
return sol.val()
class GreedyOperation(Operation):
def __init__(self, args, order):
Operation.__init__(self, args)
self.order = order
def get_iterator(self, worker):
tasks = zip(self.args, repeat(self.order))
return worker.p.imap(greedy_val, tasks)
def get_name(self):
return self.order
class Worker(QtCore.QThread):
"""An active object for background computations."""
def __init__(self, helper, parent = None):
super(Worker, self).__init__(parent)
self.helper = helper
# custom signals for the GUI
QtCore.QObject.connect(self, QtCore.SIGNAL("progress(int)"), helper.update_progress)
QtCore.QObject.connect(self, QtCore.SIGNAL("newProgress(int)"), helper.init_progress)
# terminating signals for the GUI
QtCore.QObject.connect(self, QtCore.SIGNAL("finished()"), helper.background_done)
QtCore.QObject.connect(self, QtCore.SIGNAL("terminated()"), helper.background_done)
# the operation, passed before starting the thread
self.currentOp = None
# a single pool for processing
from multiprocessing import Pool
self.p = Pool()
def run(self):
if not self.currentOp:
return
self.emit(QtCore.SIGNAL('newProgress(int)'), len(self.currentOp.args))
numDone = 0
for k, d in self.currentOp.get_iterator(self):
self.currentOp.ks.append(k)
self.currentOp.ds.append(d)
numDone += 1
self.emit(QtCore.SIGNAL('progress(int)'), numDone)
self.helper.plot.display(self.currentOp)
def performOperation(self, operation):
self.currentOp = operation
self.helper.lock_ui()
self.start()
class Helper(QtGui.QDialog):
def __init__(self, parent=None):
# boilerplate
QtGui.QDialog.__init__(self, parent)
self.ui = Ui_Helper()
self.ui.setupUi(self)
# load sorting orders into combobox
from vrptw import sort_keys
self.ui.greedyOrder.addItems(sorted(sort_keys.keys()))
# add custom mpl canvas
self.plot = Plot(self)
# the worker thread (one, for now)
self.worker = Worker(self)
# the stopwatch placeholder
self.watch = '(no watch set!)'
QtCore.QObject.connect(self.ui.update, QtCore.SIGNAL("clicked()"), self.plot_savings)
QtCore.QObject.connect(self.ui.best, QtCore.SIGNAL("clicked()"), self.plot_best)
QtCore.QObject.connect(self.ui.greedy, QtCore.SIGNAL("clicked()"), self.plot_greedy)
QtCore.QObject.connect(self.ui.clearPlot, QtCore.SIGNAL("clicked()"), self.clear_plot)
def lock_ui(self):
"""Called before entering the background operation."""
from stopwatch import StopWatch
self.watch = StopWatch()
self.ui.update.setEnabled(False)
self.ui.best.setEnabled(False)
def background_done(self):
"""Slot to unlock some UI elements after finished background operation."""
self.ui.update.setEnabled(True)
self.ui.best.setEnabled(True)
self.ui.progressBar.setEnabled(False)
self.ui.textEdit.append("Processing finished in %s seconds" % self.watch)
print "What now?", self.watch
def plot_best(self):
self.worker.performOperation(BestOperation(self.tests_chosen()))
def plot_savings(self):
mi = self.ui.mi.value()
waitlimit = self.ui.waitlimit.value() if self.ui.has_waitlimit.checkState() else None
mfs = self.ui.mfs.checkState()
self.worker.performOperation(SavingsOperation(self.tests_chosen(), mi, waitlimit, mfs))
def plot_greedy(self):
order = str(self.ui.greedyOrder.currentText())
self.worker.performOperation(GreedyOperation(self.tests_chosen(), order))
def clear_plot(self):
"""Slot for clearing the plot."""
self.plot.reset()
def init_progress(self, maxProgress):
"""Slot for resetting the progress bar's value to 0 with a new maximum."""
self.ui.progressBar.setEnabled(True)
self.ui.progressBar.setMaximum(maxProgress)
self.ui.progressBar.setValue(0)
def update_progress(self, progress):
"""Slot for updating the progress bar."""
print "--- one done ---"
self.ui.progressBar.setValue(progress)
def tests_chosen(self):
"""Return the selected pattern in the families list."""
return str(self.ui.families.currentItem().text())
if __name__ == "__main__":
app = QtGui.QApplication(sys.argv)
# almost standard:
helper = Helper()
helper.show()
sys.exit(app.exec_())
| Python |
#!/usr/bin/env python
homberger_urls = [
'http://www.sintef.no/Projectweb/TOP/Problems/VRPTW/Homberger-benchmark/%d00-customers/' % n
for n in xrange(2,11,2)
]
solomon_urls = [
'http://web.cba.neu.edu/~msolomon/c1c2solu.htm',
'http://web.cba.neu.edu/~msolomon/r1r2solu.htm',
'http://web.cba.neu.edu/~msolomon/rc12solu.htm',
'http://web.cba.neu.edu/~msolomon/heuristi.htm'
]
import re
import urllib2
import time
homb = re.compile(r'''<td style.*?([rc]{1,2}[12][0-9_]{4,6}).*?<td.*?(\d+).*?<td.*?([\d\.]+)''', re.DOTALL)
solo = re.compile('''([RC]{1,2}[12]\d{2}\.?\d{0,3})\s*(\d+)\s*(\d+\.\d+)''', re.DOTALL)
# download function
get = lambda url: urllib2.urlopen(url).read()
def save(match):
name, vehicles, distance = match.groups()
name = name.replace('_10', '10')
fname = 'vrptw/bestknown/%s.txt' % name
old_data = open(fname).read()
new_data = '%s %s\n' % (vehicles, distance)
summary = '%-7s %3s %s' % (name, vehicles, distance)
if old_data <> new_data:
open(fname, 'w').write(new_data)
print summary, 'CHANGED, from:', old_data
else:
print summary
return summary
def sanitize(dta):
"""Prepare some bad HTML for easier regexp scanning."""
dta = re.compile("<!--.*?-->", re.DOTALL).sub('', dta)
dta = re.compile("<style.*?</style>", re.DOTALL).sub('', dta)
dta = re.compile("<.*?>", re.DOTALL).sub('', dta)
dta = re.sub('[^\d\n \.RC]+', ' ', dta)
return dta
def get_hombergers_sintef():
"""Download best result for Hombergers tests from SINTEF site."""
summary = []
for u in homberger_urls:
print "Visiting", u
start = time.time()
data = get(u)
s, kb = time.time()-start, len(data)/1024.0
found = homb.finditer(data)
for m in found:
summary.append(save(m))
print "Downloaded %.1f KB in %.1f s (%.1f KB/s)" % (kb, s, kb/s)
open('vrptw/bestknown/summary_H.txt', 'w').write("\n".join(sorted(summary,
key=lambda x: x.replace('_', '0'))))
def get_solomons():
"""Download best results for Solomons tests as published by himself."""
import os.path
# local debug proxy for downloading - uncomment below
# get = lambda url: open(os.path.basename(url)).read()
summary = []
for u in solomon_urls:
print "Visiting", u
start = time.time()
data = sanitize(get(u))
found = solo.finditer(data)
for m in found:
summary.append(save(m))
s, kb = time.time()-start, len(data)/1024.0
print "Downloaded %.1f KB in %.1f s (%.1f KB/s)" % (kb, s, kb/s)
open('vrptw/bestknown/summary_S.txt', 'w').write("\n".join(sorted(summary,
key=lambda x: x.replace('_', '0'))))
if __name__ == '__main__':
get_hombergers_sintef()
# get_solomons()
| Python |
from vrptw.consts import *
from itertools import count
def pairs(iterable):
"""A generator for adjacent elements of an iterable."""
it = iter(iterable)
prev = it.next()
for next_ in it:
yield (prev, next_)
prev = next_
def test_pairs():
"""Unit test for pairs() generator."""
for actual, expected in zip(pairs(range(5)), [(i, i+1) for i in range(4)]):
assert actual == expected
def d(s):
"""Debug print with a sleep."""
import time
print s
time.sleep(1)
def dd(s):
"""Debug print, no sleep."""
print s
def solution_header(sol):
"""
The Czarnas' code features a 'routeCostMultipiler', which is used like this:
(Solution.h)
routeCostMultiplier = ROUTE_COST_WEIGHT * MAX_CUSTOMERS;
FLOAT result = routeCostMultiplier * routes + TO_FLOAT(totalDistance);
(constants.h)
#define ROUTE_COST_WEIGHT (2.0*((MAX_X - MIN_X)+(MAX_Y - MIN_Y)))
#define MAX_CUSTOMERS 100
#define MIN_X 0.0
#define MAX_X 100.0
#define MIN_Y 0.0
#define MAX_Y 100.0
-> This formula is - arguably - bad, because it depends on the number
of customers and the coordinates range, which for Homberger tests
is different and quite large and can overflow integers
for 100 - routeCostMultiplier = 40 000, >2**15
for 1000 - 4 000 000 > 2**21, multiplied by 6 decimal places (20 bits)
additionally, TODO
"""
routeCostMultiplier = 40000
value = sol.dist + len(sol.r) * routeCostMultiplier
result = "Solution:\nRoutes: %d\n" % (len(sol.r))
result += "Vehicle capacity: %.2f\nSolution value: %.3f\n" % (sol.task.capa, value)
result += "Total travel distance: %.3f\n" % sol.dist
return result
def print_like_Czarnas(sol, sparse=False):
"""Prints the solution in a form compatible (and diffable) with Czarnas."""
result = solution_header(sol)
for rt, num in zip(sol.r, count(1)):
if (not sparse) or rt[R_LEN] > 2:
result += "Route: %d, len: %d, dist: %.3f, max cap: %.2f" % (
num, rt[R_LEN], rt[R_DIS], rt[R_CAP])
result += ", route: "+"-".join(
str(e[E_FRO]) for e in rt[R_EDG][1:] )+"\n"
if sparse and any(rt[R_LEN]==2 for rt in sol.r):
result += "Single routes: " + ", ".join(str(rt[R_EDG][1][E_FRO]) for rt in sol.r if rt[R_LEN]==2)+"\n"
print result
def print_like_Czarnas_long(sol):
"""Prints a verbose description of the solution (one line per customer).
Compatible with the printSolutionAllData() method in the reference code
DATATYPE dist = data.getDistance(DEPOT, getRouteStart(r));
for (int c = getRouteStart(r); c != DEPOT; c = cust[c].getNext()) {
initCap -= TO_FLOAT(data.getDemand(c));
fprintf(output, "(%2d, %7.2f, %7.2f, %7.2f, %7.2f, %5.2f, %6.2f, %6.2f, %4.1f)\n", c,
TO_FLOAT(cust[c].getArrival()),
TO_FLOAT(cust[c].getLatestArrival()),
TO_FLOAT(data.getBeginTime(c)), TO_FLOAT(data.getEndTime(c)),
TO_FLOAT(data.getServiceTime(c)),
TO_FLOAT(data.getDistance(cust[c].getPrev(), c)), initCap,
TO_FLOAT(data.getDemand(c)));
if (initCap > TO_FLOAT(data.getVehicleCapacity()) || initCap < 0.0)
fprintf(output, "************* vehicle capacity violated!!!\n");
dist += data.getDistance(c, cust[c].getNext());
}
"""
result = solution_header(sol)
for rt, num in zip(sol.r, count(1)):
result += (
"Route: %d\nRoute length: %d\nRoute cost: %.3f\n"
"Init capacity: %.2f, max capacity = %.2f\n" %
(num, rt[R_LEN], rt[R_DIS], rt[R_CAP], rt[R_CAP]) +
"Route \n"
"(cust, arriv, ltstArr, bgnWind, endWind, srvcT, dstPrv, weight, dem):\n"
" ------------------------------------------------------------------\n"
)
wgt = 0
for bef, aft in pairs(rt[R_EDG]):
cust = bef[E_TOW]
result += (
"(%2d, %7.2f, %7.2f, %7.2f, %7.2f, %5.2f, %6.2f, %6.2f, %4.1f)\n" %
( cust, aft[E_ARF], bef[E_LAT], sol.a(cust), sol.b(cust),
sol.task.cust[cust][SRV], sol.d(bef[E_FRO], cust), wgt, sol.dem(cust) )
)
result += "\n"
print result
def symbol(i):
"""Return a suitable symbol for displaying the customer"""
for t, f in [
(62, lambda x: '+'),
(36, lambda x: chr(ord('a')+x-36)),
(10, lambda x: chr(ord('A')+x-10)),
( 0, lambda x: chr(ord('0')+x)),
(None, lambda x: '?') ]:
if i >= t:
return f(i)
def describe(sol, cols=50, onlyrouted=True):
"""Produces a textual representation of the task."""
customers = [ sol.task.cust[c] for c in
set(x[E_FRO] for r in sol.r for x in r[R_EDG])
] if onlyrouted else sol.task.cust
minx, maxx, miny, maxy = [
op( x[k] for x in customers ) for k in X, Y for op in min, max ]
sx, sy = (maxx - minx), (maxy-miny)
rows = sy * cols // sx
board = [ [ ' ' for i in xrange(cols+1) ] for j in xrange(rows+1) ]
for y, x, i in [ ((c[Y]-miny)*rows//sy, (c[X]-minx)*cols//sx, c[ID])
for c in customers ]:
board[y][x] = symbol(i)
print "\n".join("".join(row) for row in board[::-1])
| Python |
# undo handlers
def undo_ins(list_, idx):
list_.pop(idx)
def undo_pop(list_, idx, val):
list_.insert(idx, val)
def undo_set(list_, idx, val):
list_[idx] = val
def undo_atr(obj, atr, val):
setattr(obj, atr, val)
def undo_add(list_, idx, val):
list_[idx] -= val
def undo_ada(obj, atr, val):
setattr(obj, atr, getattr(obj, atr) - val)
# undo elements type:
#
U_ELEM_IN, U_ELEM_OUT, U_ELEM_MOD, U_ATTRIB, U_ADD, U_ADA, U_CHECKPOINT = range(7)
# undo mapping
handlers = [ undo_ins, undo_pop, undo_set, setattr, undo_add, undo_ada ]
class UndoStack(object):
"""Holds description of a sequence of operations, possibly separated by checkpoints."""
def __init__(self):
"""Construct empty undo stack."""
self.commit()
def ins(self, list_, idx, value):
"""Inserts the value at a specific index in list and returns for chaining."""
self.actions.append( (U_ELEM_IN, (list_, idx)) ) # value not needed
list_.insert(idx, value)
return value
def pop(self, list_, idx):
"""Removes a list element and returns its value."""
data = list_.pop(idx)
self.actions.append( (U_ELEM_OUT, (list_, idx, data)) )
return data
def set(self, list_, idx, value):
"""Sets a list element to new value, returns it for possible chaining."""
self.actions.append( (U_ELEM_MOD, (list_, idx, list_[idx])) )
list_[idx] = value
return value
def checkpoint(self):
"""Marks current state and returns the marker."""
self.point += 1
self.actions.append( (U_CHECKPOINT, self.point) )
return self.point
def atr(self, obj, atr, val):
"""Change an object's attribute."""
data = getattr(obj, atr)
self.actions.append( (U_ATTRIB, (obj, atr, data)) )
setattr(obj, atr, val)
return val
def add(self, list_, idx, value):
"""Inplace add something to list element."""
self.actions.append( (U_ADD, (list_, idx, value)) )
list_[idx] += value
def ada(self, obj, atr, val):
"""Inplace add to object's attribute."""
data = getattr(obj, atr)
self.actions.append( (U_ADA, (obj, atr, val)) )
setattr(obj, atr, val+data)
return val+data
def commit(self):
"""Forget all undo information."""
self.actions = []
self.point = 0
def undo(self, checkpoint = None):
"""Reverse all operation performed through this stack, or up to a checkpoint."""
assert checkpoint <= self.point, 'Undo to invalid checkpoint'
while len(self.actions):
tag, args = self.actions.pop()
if tag == U_CHECKPOINT:
if args == checkpoint:
self.point = checkpoint-1
break
else:
# print tag, args
handlers[tag](*args)
def undo_last(self):
"""Rollback actions to last checkpoint."""
assert self.point > 0, 'No actions to undo'
self.undo(self.point)
class TestUndoStack(object):
"""Unit test class for py.test"""
def setup_class(self):
"""Create the UndoStack used with every test and an example list."""
self.u = UndoStack()
self.l_orig = [7, 'dolorem', 4, None, 5.3]
self.l = self.l_orig[:]
def setup_method(self, method):
"""Restore the example list, not needed if tests pass, undo does it."""
# self.l = self.l_orig[:]
def test_ins(self):
"""Undoing an insertion."""
self.u.ins(self.l, 0, 2)
expected = [2]+self.l_orig
assert self.l == expected
self.u.undo()
assert self.l == self.l_orig
def test_pop(self):
out = self.u.pop(self.l, 2)
assert out == 4
self.u.undo()
assert self.l == self.l_orig
def test_set(self):
self.u.set(self.l, 1, 'ipsum')
assert self.l[1] == 'ipsum'
self.u.undo()
assert self.l == self.l_orig
def test_sequence(self):
self.u.pop(self.l, 3)
self.u.ins(self.l, 3, 123)
tag = self.u.checkpoint()
l_on_check = self.l[:]
self.u.set(self.l, 0, 0)
self.u.pop(self.l, 0)
self.u.undo(tag)
assert l_on_check == self.l
self.u.undo()
assert self.l == self.l_orig
def test_atr(self):
self.color = 'red'
self.u.atr(self, 'color', 'blue')
assert self.color == 'blue'
self.u.undo()
assert self.color == 'red'
| Python |
#====================================================================
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# ====================================================================
#
# This software consists of voluntary contributions made by many
# individuals on behalf of the Apache Software Foundation. For more
# information on the Apache Software Foundation, please see
# <http://www.apache.org/>.
#
import os
import re
import tempfile
import shutil
ignore_pattern = re.compile('^(.svn|target|bin|classes)')
java_pattern = re.compile('^.*\.java')
annot_pattern = re.compile('import org\.apache\.http\.annotation\.')
def process_dir(dir):
files = os.listdir(dir)
for file in files:
f = os.path.join(dir, file)
if os.path.isdir(f):
if not ignore_pattern.match(file):
process_dir(f)
else:
if java_pattern.match(file):
process_source(f)
def process_source(filename):
tmp = tempfile.mkstemp()
tmpfd = tmp[0]
tmpfile = tmp[1]
try:
changed = False
dst = os.fdopen(tmpfd, 'w')
try:
src = open(filename)
try:
for line in src:
if annot_pattern.match(line):
changed = True
line = line.replace('import org.apache.http.annotation.', 'import net.jcip.annotations.')
dst.write(line)
finally:
src.close()
finally:
dst.close();
if changed:
shutil.move(tmpfile, filename)
else:
os.remove(tmpfile)
except:
os.remove(tmpfile)
process_dir('.')
| Python |
import os, time
class MyException(Exception):
pass
def passing(*args):
pass
def sleeping(s):
seconds = s
while seconds > 0:
time.sleep(min(seconds, 0.1))
seconds -= 0.1
os.environ['ROBOT_THREAD_TESTING'] = str(s)
return s
def returning(arg):
return arg
def failing(msg='xxx'):
raise MyException, msg
if os.name == 'java':
from java.lang import Error
def java_failing(msg='zzz'):
raise Error(msg)
| Python |
#!/usr/bin/env python
"""Helper script to run all Robot Framework's unit tests.
usage: run_utest.py [options]
options:
-q, --quiet Minimal output
-v, --verbose Verbose output
-d, --doc Show test's doc string instead of name and class
(implies verbosity)
-h, --help Show help
"""
import unittest
import os
import sys
import re
import getopt
base = os.path.abspath(os.path.normpath(os.path.split(sys.argv[0])[0]))
for path in ['../src', '../src/robot/libraries', '../src/robot',
'../atest/testresources/testlibs' ]:
path = os.path.join(base, path.replace('/', os.sep))
if path not in sys.path:
sys.path.insert(0, path)
testfile = re.compile("^test_.*\.py$", re.IGNORECASE)
imported = {}
def get_tests(directory=None):
if directory is None:
directory = base
sys.path.insert(0, directory)
tests = []
for name in os.listdir(directory):
if name.startswith("."): continue
fullname = os.path.join(directory, name)
if os.path.isdir(fullname):
tests.extend(get_tests(fullname))
elif testfile.match(name):
modname = os.path.splitext(name)[0]
if modname in imported:
sys.stderr.write("Test module '%s' imported both as '%s' and "
"'%s'.\nRename one or fix test discovery.\n"
% (modname, imported[modname],
os.path.join(directory, name)))
sys.exit(1)
module = __import__(modname)
imported[modname] = module.__file__
tests.append(unittest.defaultTestLoader.loadTestsFromModule(module))
return tests
def parse_args(argv):
docs = 0
verbosity = 1
try:
options, args = getopt.getopt(argv, 'hH?vqd',
['help','verbose','quiet','doc'])
if len(args) != 0:
raise getopt.error, 'no arguments accepted, got %s' % (args)
except getopt.error, err:
usage_exit(err)
for opt, value in options:
if opt in ('-h','-H','-?','--help'):
usage_exit()
if opt in ('-q','--quit'):
verbosity = 0
if opt in ('-v', '--verbose'):
verbosity = 2
if opt in ('-d', '--doc'):
docs = 1
verbosity = 2
return docs, verbosity
def usage_exit(msg=None):
print __doc__
if msg is None:
rc = 251
else:
print '\nError:', msg
rc = 252
sys.exit(rc)
if __name__ == '__main__':
docs, vrbst = parse_args(sys.argv[1:])
tests = get_tests()
suite = unittest.TestSuite(tests)
runner = unittest.TextTestRunner(descriptions=docs, verbosity=vrbst)
result = runner.run(suite)
rc = len(result.failures) + len(result.errors)
if rc > 250: rc = 250
sys.exit(rc)
| Python |
#!/usr/bin/env python
import urllib2
import shutil
import os
from os.path import join, exists, dirname, abspath
from glob import glob
from subprocess import call
from zipfile import ZipFile
JASMINE_REPORTER_URL='https://github.com/larrymyers/jasmine-reporters/zipball/0.2.1'
BASE = abspath(dirname(__file__))
REPORT_DIR = join(BASE, 'jasmine-results')
EXT_LIB = join(BASE, '..', 'ext-lib')
JARDIR = join(EXT_LIB, 'jasmine-reporters', 'ext')
def run_tests():
workdir = os.getcwd()
os.chdir(BASE)
download_jasmine_reporters()
clear_reports()
run()
os.chdir(workdir)
def run():
cmd = ['java', '-cp', '%s%s%s' % (join(JARDIR, 'js.jar'), os.pathsep, join(JARDIR, 'jline.jar')),
'org.mozilla.javascript.tools.shell.Main', '-opt', '-1', 'envjs.bootstrap.js',
join(BASE, 'webcontent', 'SpecRunner.html')]
call(cmd)
def clear_reports():
if exists(REPORT_DIR):
shutil.rmtree(REPORT_DIR)
os.mkdir(REPORT_DIR)
def download_jasmine_reporters():
if exists(join(EXT_LIB, 'jasmine-reporters')):
return
if not exists(EXT_LIB):
os.mkdir(EXT_LIB)
reporter = urllib2.urlopen(JASMINE_REPORTER_URL)
with open(join(EXT_LIB, 'tmp.zip'), 'w') as temp:
temp.write(reporter.read())
with open(join(EXT_LIB, 'tmp.zip'), 'r') as temp:
ZipFile(temp).extractall(EXT_LIB)
extraction_dir = glob(join(EXT_LIB, 'larrymyers-jasmine-reporters*'))[0]
print 'Extracting Jasmine-Reporters to', extraction_dir
shutil.move(extraction_dir, join(EXT_LIB, 'jasmine-reporters'))
if __name__ == '__main__':
run_tests()
| Python |
MY_VARIABLE = "An example string"
| Python |
import sys
from os import remove
from os.path import exists
import unittest
from StringIO import StringIO
class RunningTestCase(unittest.TestCase):
remove_files = []
def setUp(self):
self.orig__stdout__ = sys.__stdout__
self.orig__stderr__ = sys.__stderr__
self.orig_stdout = sys.stdout
self.orig_stderr = sys.stderr
sys.__stdout__ = StringIO()
sys.__stderr__ = StringIO()
sys.stdout = StringIO()
sys.stderr = StringIO()
self._remove_files()
def tearDown(self):
sys.__stdout__ = self.orig__stdout__
sys.__stderr__ = self.orig__stderr__
sys.stdout = self.orig_stdout
sys.stderr = self.orig_stderr
self._remove_files()
def _assert_outputs(self, stdout=None, stderr=None):
self._assert_output(sys.__stdout__, stdout)
self._assert_output(sys.__stderr__, stderr)
self._assert_output(sys.stdout, None)
self._assert_output(sys.stderr, None)
def _assert_output(self, stream, expected):
output = stream.getvalue()
if expected:
for content, count in expected:
self._assert_output_contains(output, content, count)
else:
self._assert_no_output(output)
def _assert_no_output(self, output):
if output:
raise AssertionError('Expected output to be empty:\n%s' % output)
def _assert_output_contains(self, output, content, count):
if output.count(content) != count:
raise AssertionError("'%s' not %d times in output:\n%s"
% (content, count, output))
def _remove_files(self):
for path in self.remove_files:
if exists(path):
remove(path)
| Python |
import os
THIS_PATH = os.path.dirname(__file__)
GOLDEN_OUTPUT = os.path.join(THIS_PATH, 'golden_suite', 'output.xml')
GOLDEN_OUTPUT2 = os.path.join(THIS_PATH, 'golden_suite', 'output2.xml')
GOLDEN_JS = os.path.join(THIS_PATH, 'golden_suite', 'expected.js')
| Python |
__author__ = 'janne'
| Python |
#!/usr/bin/env python
import fileinput
from os.path import join, dirname, abspath
import sys
import os
BASEDIR = dirname(abspath(__file__))
OUTPUT = join(BASEDIR, 'output.xml')
sys.path.insert(0, join(BASEDIR, '..', '..', '..', '..', 'src'))
import robot
from robot.conf.settings import RebotSettings
from robot.reporting.resultwriter import Results
from robot.reporting.jswriter import JsResultWriter, JsonWriter
def create(testdata, target, split_log=False):
testdata = join(BASEDIR, testdata)
output_name = target[0].lower() + target[1:-3] + 'Output'
target = join(BASEDIR, target)
run_robot(testdata)
create_jsdata(target, split_log)
inplace_replace_all(target, 'window.output', 'window.' + output_name)
def run_robot(testdata, output=OUTPUT):
robot.run(testdata, log='NONE', report='NONE', output=output)
def create_jsdata(target, split_log, outxml=OUTPUT):
result = Results(RebotSettings({'splitlog': split_log}), outxml).js_result
config = {'logURL': 'log.html', 'reportURL': 'report.html', 'background': {'fail': 'DeepPink'}}
with open(target, 'w') as output:
JsResultWriter(output, start_block='', end_block='\n').write(result, config)
writer = JsonWriter(output)
for index, (keywords, strings) in enumerate(result.split_results):
writer.write_json('window.outputKeywords%d = ' % index, keywords)
writer.write_json('window.outputStrings%d = ' % index, strings)
def inplace_replace_all(file, search, replace):
for line in fileinput.input(file, inplace=1):
sys.stdout.write(line.replace(search, replace))
if __name__ == '__main__':
create('Suite.txt', 'Suite.js')
create('SetupsAndTeardowns.txt', 'SetupsAndTeardowns.js')
create('Messages.txt', 'Messages.js')
create('teardownFailure', 'TeardownFailure.js')
create(join('teardownFailure', 'PassingFailing.txt'), 'PassingFailing.js')
create('TestsAndKeywords.txt', 'TestsAndKeywords.js')
create('.', 'allData.js')
create('.', 'splitting.js', split_log=True)
os.remove(OUTPUT)
| Python |
Subsets and Splits
SQL Console for ajibawa-2023/Python-Code-Large
Provides a useful breakdown of language distribution in the training data, showing which languages have the most samples and helping identify potential imbalances across different language groups.