code stringlengths 1 1.72M | language stringclasses 1 value |
|---|---|
# Copyright 2012 Google Inc. All Rights Reserved.
"""Library to make BigQuery v2 client requests."""
__author__ = 'kbrisbin@google.com (Kathryn Hurley)'
import cgi
import errors
import logging
from apiclient.discovery import build
from apiclient.errors import HttpError
TIMEOUT_MS = 1000
BIGQUERY_API_VERSION = 'v2'
class BigQueryClient(object):
"""BigQuery version 2 client."""
def __init__(self, project_id, api_version=BIGQUERY_API_VERSION):
"""Creates the BigQuery client connection.
Args:
project_id: either the numeric ID or your registered ID.
This defines the project to receive the bill for query usage.
api_version: version of BigQuery API to construct.
"""
self.service = build('bigquery', api_version)
self.project_id = project_id
def query(self, authorized_http, query):
"""Issues an synchronous query to bigquery v2.
Args:
authorized_http: the authorized Http instance.
query: string SQL query to run.
Returns:
The string job reference.
Raises:
QueryError if the query fails.
"""
logging.info(query)
job_collection = self.service.jobs()
job_data = {
'projectId': self.project_id,
'configuration': {
'query': {
'query': query
}
}
}
request = job_collection.insert(
projectId=self.project_id,
body=job_data)
try:
response = request.execute(authorized_http)
except HttpError:
raise errors.QueryError
return response['jobReference']['jobId']
def poll(self, authorized_http, job_id, timeout_ms=TIMEOUT_MS):
"""Polls the job to get results.
Args:
authorized_http: the authorized Http instance.
job_id: the running job.
timeout_ms: the number of milliseconds to wait for results.
Returns:
The job results.
Raises:
PollError when the poll fails.
"""
job_collection = self.service.jobs()
request = job_collection.getQueryResults(
projectId=self.project_id,
jobId=job_id,
timeoutMs=timeout_ms)
try:
response = request.execute(authorized_http)
except HttpError, err:
logging.error(cgi.escape(err._get_reason()))
raise errors.PollError
if 'jobComplete' in response:
complete = response['jobComplete']
if complete:
rows = response['rows']
schema = response['schema']
converter = self.Converter(schema)
formatted_rows = []
for row in rows:
formatted_rows.append(converter.convert_row(row))
response['formattedRows'] = formatted_rows
return response
class Converter(object):
"""Does schema-based type conversion of result data."""
def __init__(self, schema_row):
"""Sets up the schema converter.
Args:
schema_row: a dict containing BigQuery schema definitions.
"""
self.schema = []
for field in schema_row['fields']:
self.schema.append(field['type'])
def convert_row(self, row):
"""Converts a row of data into a tuple with type conversion applied.
Args:
row: a row of BigQuery data.
Returns:
A tuple with the converted data values for the row.
"""
i = 0
data = []
for entry in row['f']:
data.append(self.convert(entry['v'], self.schema[i]))
i += 1
return tuple(data)
def convert(self, entry, schema_type):
"""Converts an entry based on the schema type given.
Args:
entry: the data entry to convert.
schema_type: appropriate type for the entry.
Returns:
The data entry, either as passed in, or converted to the given type.
"""
if schema_type == u'FLOAT' and entry is not None:
return float(entry)
elif schema_type == u'INTEGER' and entry is not None:
return int(entry)
else:
return entry
| Python |
#!/usr/bin/python2.6
#
# Simple http server to emulate api.playfoursquare.com
import logging
import shutil
import sys
import urlparse
import SimpleHTTPServer
import BaseHTTPServer
class RequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
"""Handle playfoursquare.com requests, for testing."""
def do_GET(self):
logging.warn('do_GET: %s, %s', self.command, self.path)
url = urlparse.urlparse(self.path)
logging.warn('do_GET: %s', url)
query = urlparse.parse_qs(url.query)
query_keys = [pair[0] for pair in query]
response = self.handle_url(url)
if response != None:
self.send_200()
shutil.copyfileobj(response, self.wfile)
self.wfile.close()
do_POST = do_GET
def handle_url(self, url):
path = None
if url.path == '/v1/venue':
path = '../captures/api/v1/venue.xml'
elif url.path == '/v1/addvenue':
path = '../captures/api/v1/venue.xml'
elif url.path == '/v1/venues':
path = '../captures/api/v1/venues.xml'
elif url.path == '/v1/user':
path = '../captures/api/v1/user.xml'
elif url.path == '/v1/checkcity':
path = '../captures/api/v1/checkcity.xml'
elif url.path == '/v1/checkins':
path = '../captures/api/v1/checkins.xml'
elif url.path == '/v1/cities':
path = '../captures/api/v1/cities.xml'
elif url.path == '/v1/switchcity':
path = '../captures/api/v1/switchcity.xml'
elif url.path == '/v1/tips':
path = '../captures/api/v1/tips.xml'
elif url.path == '/v1/checkin':
path = '../captures/api/v1/checkin.xml'
elif url.path == '/history/12345.rss':
path = '../captures/api/v1/feed.xml'
if path is None:
self.send_error(404)
else:
logging.warn('Using: %s' % path)
return open(path)
def send_200(self):
self.send_response(200)
self.send_header('Content-type', 'text/xml')
self.end_headers()
def main():
if len(sys.argv) > 1:
port = int(sys.argv[1])
else:
port = 8080
server_address = ('0.0.0.0', port)
httpd = BaseHTTPServer.HTTPServer(server_address, RequestHandler)
sa = httpd.socket.getsockname()
print "Serving HTTP on", sa[0], "port", sa[1], "..."
httpd.serve_forever()
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/python
import datetime
import sys
import textwrap
import common
from xml.dom import pulldom
PARSER = """\
/**
* Copyright 2009 Joe LaPenna
*/
package com.joelapenna.foursquare.parsers;
import com.joelapenna.foursquare.Foursquare;
import com.joelapenna.foursquare.error.FoursquareError;
import com.joelapenna.foursquare.error.FoursquareParseException;
import com.joelapenna.foursquare.types.%(type_name)s;
import org.xmlpull.v1.XmlPullParser;
import org.xmlpull.v1.XmlPullParserException;
import java.io.IOException;
import java.util.logging.Level;
import java.util.logging.Logger;
/**
* Auto-generated: %(timestamp)s
*
* @author Joe LaPenna (joe@joelapenna.com)
* @param <T>
*/
public class %(type_name)sParser extends AbstractParser<%(type_name)s> {
private static final Logger LOG = Logger.getLogger(%(type_name)sParser.class.getCanonicalName());
private static final boolean DEBUG = Foursquare.PARSER_DEBUG;
@Override
public %(type_name)s parseInner(XmlPullParser parser) throws XmlPullParserException, IOException,
FoursquareError, FoursquareParseException {
parser.require(XmlPullParser.START_TAG, null, null);
%(type_name)s %(top_node_name)s = new %(type_name)s();
while (parser.nextTag() == XmlPullParser.START_TAG) {
String name = parser.getName();
%(stanzas)s
} else {
// Consume something we don't understand.
if (DEBUG) LOG.log(Level.FINE, "Found tag that we don't recognize: " + name);
skipSubTree(parser);
}
}
return %(top_node_name)s;
}
}"""
BOOLEAN_STANZA = """\
} else if ("%(name)s".equals(name)) {
%(top_node_name)s.set%(camel_name)s(Boolean.valueOf(parser.nextText()));
"""
GROUP_STANZA = """\
} else if ("%(name)s".equals(name)) {
%(top_node_name)s.set%(camel_name)s(new GroupParser(new %(sub_parser_camel_case)s()).parse(parser));
"""
COMPLEX_STANZA = """\
} else if ("%(name)s".equals(name)) {
%(top_node_name)s.set%(camel_name)s(new %(parser_name)s().parse(parser));
"""
STANZA = """\
} else if ("%(name)s".equals(name)) {
%(top_node_name)s.set%(camel_name)s(parser.nextText());
"""
def main():
type_name, top_node_name, attributes = common.WalkNodesForAttributes(
sys.argv[1])
GenerateClass(type_name, top_node_name, attributes)
def GenerateClass(type_name, top_node_name, attributes):
"""generate it.
type_name: the type of object the parser returns
top_node_name: the name of the object the parser returns.
per common.WalkNodsForAttributes
"""
stanzas = []
for name in sorted(attributes):
typ, children = attributes[name]
replacements = Replacements(top_node_name, name, typ, children)
if typ == common.BOOLEAN:
stanzas.append(BOOLEAN_STANZA % replacements)
elif typ == common.GROUP:
stanzas.append(GROUP_STANZA % replacements)
elif typ in common.COMPLEX:
stanzas.append(COMPLEX_STANZA % replacements)
else:
stanzas.append(STANZA % replacements)
if stanzas:
# pop off the extranious } else for the first conditional stanza.
stanzas[0] = stanzas[0].replace('} else ', '', 1)
replacements = Replacements(top_node_name, name, typ, [None])
replacements['stanzas'] = '\n'.join(stanzas).strip()
print PARSER % replacements
def Replacements(top_node_name, name, typ, children):
# CameCaseClassName
type_name = ''.join([word.capitalize() for word in top_node_name.split('_')])
# CamelCaseClassName
camel_name = ''.join([word.capitalize() for word in name.split('_')])
# camelCaseLocalName
attribute_name = camel_name.lower().capitalize()
# mFieldName
field_name = 'm' + camel_name
if children[0]:
sub_parser_camel_case = children[0] + 'Parser'
else:
sub_parser_camel_case = (camel_name[:-1] + 'Parser')
return {
'type_name': type_name,
'name': name,
'top_node_name': top_node_name,
'camel_name': camel_name,
'parser_name': typ + 'Parser',
'attribute_name': attribute_name,
'field_name': field_name,
'typ': typ,
'timestamp': datetime.datetime.now(),
'sub_parser_camel_case': sub_parser_camel_case,
'sub_type': children[0]
}
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/python
"""
Pull a oAuth protected page from foursquare.
Expects ~/.oget to contain (one on each line):
CONSUMER_KEY
CONSUMER_KEY_SECRET
USERNAME
PASSWORD
Don't forget to chmod 600 the file!
"""
import httplib
import os
import re
import sys
import urllib
import urllib2
import urlparse
import user
from xml.dom import pulldom
from xml.dom import minidom
import oauth
"""From: http://groups.google.com/group/foursquare-api/web/oauth
@consumer = OAuth::Consumer.new("consumer_token","consumer_secret", {
:site => "http://foursquare.com",
:scheme => :header,
:http_method => :post,
:request_token_path => "/oauth/request_token",
:access_token_path => "/oauth/access_token",
:authorize_path => "/oauth/authorize"
})
"""
SERVER = 'api.foursquare.com:80'
CONTENT_TYPE_HEADER = {'Content-Type' :'application/x-www-form-urlencoded'}
SIGNATURE_METHOD = oauth.OAuthSignatureMethod_HMAC_SHA1()
AUTHEXCHANGE_URL = 'http://api.foursquare.com/v1/authexchange'
def parse_auth_response(auth_response):
return (
re.search('<oauth_token>(.*)</oauth_token>', auth_response).groups()[0],
re.search('<oauth_token_secret>(.*)</oauth_token_secret>',
auth_response).groups()[0]
)
def create_signed_oauth_request(username, password, consumer):
oauth_request = oauth.OAuthRequest.from_consumer_and_token(
consumer, http_method='POST', http_url=AUTHEXCHANGE_URL,
parameters=dict(fs_username=username, fs_password=password))
oauth_request.sign_request(SIGNATURE_METHOD, consumer, None)
return oauth_request
def main():
url = urlparse.urlparse(sys.argv[1])
# Nevermind that the query can have repeated keys.
parameters = dict(urlparse.parse_qsl(url.query))
password_file = open(os.path.join(user.home, '.oget'))
lines = [line.strip() for line in password_file.readlines()]
if len(lines) == 4:
cons_key, cons_key_secret, username, password = lines
access_token = None
else:
cons_key, cons_key_secret, username, password, token, secret = lines
access_token = oauth.OAuthToken(token, secret)
consumer = oauth.OAuthConsumer(cons_key, cons_key_secret)
if not access_token:
oauth_request = create_signed_oauth_request(username, password, consumer)
connection = httplib.HTTPConnection(SERVER)
headers = {'Content-Type' :'application/x-www-form-urlencoded'}
connection.request(oauth_request.http_method, AUTHEXCHANGE_URL,
body=oauth_request.to_postdata(), headers=headers)
auth_response = connection.getresponse().read()
token = parse_auth_response(auth_response)
access_token = oauth.OAuthToken(*token)
open(os.path.join(user.home, '.oget'), 'w').write('\n'.join((
cons_key, cons_key_secret, username, password, token[0], token[1])))
oauth_request = oauth.OAuthRequest.from_consumer_and_token(consumer,
access_token, http_method='POST', http_url=url.geturl(),
parameters=parameters)
oauth_request.sign_request(SIGNATURE_METHOD, consumer, access_token)
connection = httplib.HTTPConnection(SERVER)
connection.request(oauth_request.http_method, oauth_request.to_url(),
body=oauth_request.to_postdata(), headers=CONTENT_TYPE_HEADER)
print connection.getresponse().read()
#print minidom.parse(connection.getresponse()).toprettyxml(indent=' ')
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/python
import os
import subprocess
import sys
BASEDIR = '../main/src/com/joelapenna/foursquare'
TYPESDIR = '../captures/types/v1'
captures = sys.argv[1:]
if not captures:
captures = os.listdir(TYPESDIR)
for f in captures:
basename = f.split('.')[0]
javaname = ''.join([c.capitalize() for c in basename.split('_')])
fullpath = os.path.join(TYPESDIR, f)
typepath = os.path.join(BASEDIR, 'types', javaname + '.java')
parserpath = os.path.join(BASEDIR, 'parsers', javaname + 'Parser.java')
cmd = 'python gen_class.py %s > %s' % (fullpath, typepath)
print cmd
subprocess.call(cmd, stdout=sys.stdout, shell=True)
cmd = 'python gen_parser.py %s > %s' % (fullpath, parserpath)
print cmd
subprocess.call(cmd, stdout=sys.stdout, shell=True)
| Python |
#!/usr/bin/python
import logging
from xml.dom import minidom
from xml.dom import pulldom
BOOLEAN = "boolean"
STRING = "String"
GROUP = "Group"
# Interfaces that all FoursquareTypes implement.
DEFAULT_INTERFACES = ['FoursquareType']
# Interfaces that specific FoursqureTypes implement.
INTERFACES = {
}
DEFAULT_CLASS_IMPORTS = [
]
CLASS_IMPORTS = {
# 'Checkin': DEFAULT_CLASS_IMPORTS + [
# 'import com.joelapenna.foursquare.filters.VenueFilterable'
# ],
# 'Venue': DEFAULT_CLASS_IMPORTS + [
# 'import com.joelapenna.foursquare.filters.VenueFilterable'
# ],
# 'Tip': DEFAULT_CLASS_IMPORTS + [
# 'import com.joelapenna.foursquare.filters.VenueFilterable'
# ],
}
COMPLEX = [
'Group',
'Badge',
'Beenhere',
'Checkin',
'CheckinResponse',
'City',
'Credentials',
'Data',
'Mayor',
'Rank',
'Score',
'Scoring',
'Settings',
'Stats',
'Tags',
'Tip',
'User',
'Venue',
]
TYPES = COMPLEX + ['boolean']
def WalkNodesForAttributes(path):
"""Parse the xml file getting all attributes.
<venue>
<attribute>value</attribute>
</venue>
Returns:
type_name - The java-style name the top node will have. "Venue"
top_node_name - unadultured name of the xml stanza, probably the type of
java class we're creating. "venue"
attributes - {'attribute': 'value'}
"""
doc = pulldom.parse(path)
type_name = None
top_node_name = None
attributes = {}
level = 0
for event, node in doc:
# For skipping parts of a tree.
if level > 0:
if event == pulldom.END_ELEMENT:
level-=1
logging.warn('(%s) Skip end: %s' % (str(level), node))
continue
elif event == pulldom.START_ELEMENT:
logging.warn('(%s) Skipping: %s' % (str(level), node))
level+=1
continue
if event == pulldom.START_ELEMENT:
logging.warn('Parsing: ' + node.tagName)
# Get the type name to use.
if type_name is None:
type_name = ''.join([word.capitalize()
for word in node.tagName.split('_')])
top_node_name = node.tagName
logging.warn('Found Top Node Name: ' + top_node_name)
continue
typ = node.getAttribute('type')
child = node.getAttribute('child')
# We don't want to walk complex types.
if typ in COMPLEX:
logging.warn('Found Complex: ' + node.tagName)
level = 1
elif typ not in TYPES:
logging.warn('Found String: ' + typ)
typ = STRING
else:
logging.warn('Found Type: ' + typ)
logging.warn('Adding: ' + str((node, typ)))
attributes.setdefault(node.tagName, (typ, [child]))
logging.warn('Attr: ' + str((type_name, top_node_name, attributes)))
return type_name, top_node_name, attributes
| Python |
#!/usr/bin/env python2.5
# Freecell4Maemo, Copyright 2008, Roy Wood
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# To Do:
# - on-screen toggle for smart move mode
# - intelligent use of empty stacks when moving columns
# - smart-move a column?
# - save game state
# - smart-mode, just set targetStack and fall through?
"""
Freecell4Maemo is an implementation of the classic Freecell cardgame for the Nokia "Maemo" platform.
The code is pretty small, and I have tried to comment it effectively throughout, so you should have be able to
figure things out pretty easily.
Some of the more significant pieces are as follows:
class Rect - a rectangle; important members are top, left, width, height
class Card - a playing card; important members are cardnum(0-51), screen location/size, pixbuf
class CardStack - a stack of Card objects; important members are screen location/size, cards, "empty stack" pixbuf, stack suit
class Freecell - the main class for the app; uses the other classes as needed
Some significant points about the main "Freecell" class are:
- the __init__ method creates all the basic object members, loads all the card images, creates the GUI
- the GUI is a single window containing a GTK DrawingArea
- all drawing is done in an offscreen PixMap
- the offscreen PixMap is blitted to the DrawingArea in response to expose events
- the offscreen PixMap is created in the configure event handler, not __init__
- all GraphicContext objects are created in the configure event handler
- the configure handler also triggers a call to set the rects of the CardStacks (important for switching between fullscreen and smallscreen)
- the real game logic is in the button_press_event handler (and yes, it gets a little messy)
"""
ABOUT_TEXT = """
Freecell for Maemo
(c) 2008 Roy Wood
roy.wood@gmail.com
http://code.google.com/p/freecell4maemo/
This game is an implementation of
the classic Freecell card game for
the Nokia Maemo platform.
To move a card, click once to select
the card, then click in the
destination location.
Click the return key (square button
in the center of the directional
keypad) to auto-move cards to the
ace stacks.
Click the escape key (swoopy arrow
keypad button) to undo a move.
This program is free software: you
can redistribute it and/or modify
it under the terms of the GNU
General Public License as published
by the Free Software Foundation,
either version 3 of the License, or
(at your option) any later version.
This program is distributed in the
hope that it will be useful, but
WITHOUT ANY WARRANTY; without
even the implied warranty of
MERCHANTABILITY or FITNESS FOR A
PARTICULAR PURPOSE. See the GNU
General Public License for more
details.
You should have received a copy of
the GNU General Public License
along with this program. If not,
a copy may be found here:
<http://www.gnu.org/licenses/>
"""
import gtk
import pygtk
import time
import random
import logging
import math
try:
import hildon
import osso
osso_c = osso.Context("com.nokia.freecell4maemo", "1.0.0", False)
hildonMode = True
except:
hildonMode = False
# Size of the inset border for the window
FULLSCREEN_BORDER_WIDTH = 10
SMALLSCREEN_BORDER_WIDTH = 2
# Border between upper/lower sets of cards
VERT_SEPARATOR_WIDTH = 10
# Suit IDs
CLUBS = 0
DIAMONDS = 1
SPADES = 2
HEARTS = 3
SUITNAMES = [ "Clubs", "Diamonds", "Spades", "Hearts" ]
# Suit colours
BLACK = 0
RED = 1
# Number of cards per suit
CARDS_PER_SUIT = 13
# Card pixbufs 0-51 are the regular cards,
NUMCARDS = 52
# Cards 52-55 are the suit-back cards (aces in top right of screen)
CLUBS_BACK = 52
DIAMONDS_BACK = 53
SPADES_BACK = 54
HEARTS_BACK = 55
# Card 56 is the the blank-back card (used to draw stacks with no cards)
BLANK_BACK = 56
# Card 57 is the fancy-back card (not currently used)
FANCY_BACK = 57
# Total number of card images
TOTALNUMCARDS = FANCY_BACK
# Number of card columns
NUMCOLUMNS = 8
# Number of "free cells"
NUMFREECELLS = 4
# Number of ace cards
NUMACES = 4
# Types of cards
FREECELL_TYPE = 0
ACE_TYPE = 1
REGULAR_TYPE = 2
# Folder containing the card images
CARDFOLDER = "/usr/share/freecell4maemo/card_images"
# Response constants for the "Move card or column" dialog (OK/CANCEL are the constants that the hildon.Note dialog returns)
MOVE_CARD_ID = gtk.RESPONSE_CANCEL
MOVE_COLUMN_ID = gtk.RESPONSE_OK
class Rect(object):
# A basic rectangle object
def __init__(self, left = 0, top = 0, width = 0, height = 0):
self.left = int(left)
self.top = int(top)
self.width = int(width)
self.height = int(height)
def setRect(self, left = 0, top = 0, width = 0, height = 0):
self.left = int(left)
self.top = int(top)
self.width = int(width)
self.height = int(height)
def enclosesXY(self, x, y):
# Determine if a point lies within the Rect
return ((x >= self.left) and (x < self.left + self.width) and (y >= self.top) and (y < self.top + self.height))
def getLeftTop(self):
return (self.left, self.top)
def getLeftTopWidthHeight(self):
return (self.left, self.top, self.width, self.height)
def unionWith(self, otherRect):
# Modify the Rect to include another Rect
left = min(self.left, otherRect.left)
right = max(self.left + self.width, otherRect.left + otherRect.width)
top = min(self.top, otherRect.top)
bottom = max(self.top + self.height, otherRect.top + otherRect.height)
self.left = left
self.top = top
self.width = (right - left)
self.height = (bottom - top)
class Card(object):
# A Card object defined by card number (0-51), screen location and size, and pixbuf
# Note that the cards are ordered A,2,3,4,5,6,7,8,9,10,J,Q,K
# The suits are ordered Clubs, Diamonds, Hearts, Spades
def __init__(self, cardNum, left = 0, top = 0, width = 0, height = 0, pixBuf = None):
self.cardNum = cardNum
self.rect = Rect(left, top, width, height)
self.pixBuf = pixBuf
def getSuit(self):
return self.cardNum // CARDS_PER_SUIT
def getSuitColour(self):
return (self.cardNum // CARDS_PER_SUIT) % 2
def getValue(self):
return self.cardNum % CARDS_PER_SUIT
def setRect(self, left = 0, top = 0, width = 0, height = 0):
self.rect.setRect(left, top, width, height)
def enclosesXY(self, x, y):
# Determine if a point lies within the Card
return self.rect.enclosesXY(x, y)
def drawCard(self, drawable, gc, xyPt = None):
# Draw the Card in the given drawable, using the supplied GC
if (xyPt != None):
left, top = xyPt
else:
left, top = self.rect.getLeftTop()
drawable.draw_pixbuf(gc, self.pixBuf, 0, 0, left, top)
def getLeftTop(self):
return self.rect.getLeftTop()
def getLeftTopWidthHeight(self):
return self.rect.getLeftTopWidthHeight()
def getRect(self):
left, top, w, h = self.rect.getLeftTopWidthHeight()
return Rect(left, top, w, h)
class CardStack(object):
# An object representing a stack of cards
# The CardStack contains a list of Card objects, possesses an onscreen location
# The CardStack can draw itself; if there are no Cards, then the emptyStackPixBuf is displayed
# The CardStack's yOffset controls the vertical offset of cards in the stack
def __init__(self, left, top, emptyStackPixBuf, stackSuit, yOffset = 0):
self.left = int(left)
self.top = int(top)
self.emptyStackPixBuf = emptyStackPixBuf
self.yOffset = yOffset
self.cardWidth = emptyStackPixBuf.get_width()
self.cardHeight = emptyStackPixBuf.get_height()
self.rect = Rect(self.left, self.top, self.cardWidth, self.cardHeight)
self.stackSuit = stackSuit
self.cards = [ ]
def getNumCards(self):
return len(self.cards)
def clearStack(self):
self.cards = [ ]
def getRect(self):
left, top, w, h = self.rect.getLeftTopWidthHeight()
return Rect(left, top, w, h)
def getLeftTopWidthHeight(self):
return self.rect.getLeftTopWidthHeight()
def setLeftTop(self, left, top):
self.left = left
self.top = top
self.rect = Rect(self.left, self.top, self.cardWidth, self.cardHeight + self.yOffset * len(self.cards))
for i in range(len(self.cards)):
self.cards[i].setRect(self.left, self.top + self.yOffset * i, self.cardWidth, self.cardHeight)
def pushCard(self, card):
card.setRect(self.left, self.top + self.yOffset * len(self.cards), self.cardWidth, self.cardHeight)
self.cards.append(card)
self.rect = Rect(self.left, self.top, self.cardWidth, self.cardHeight + self.yOffset * len(self.cards))
def getCardValueSuitColour(self, cardIndex):
# Get the card value, suit, and colour of a card on the CardStack; negative cardIndex values work the expected way (e.g. -1 is last/top card); if a bad index value is supplied, return the stack suit (i.e. ace stack suit)
if (cardIndex >= len(self.cards) or abs(cardIndex) > len(self.cards)):
return -1, self.stackSuit, self.stackSuit % 2
else:
card = self.cards[cardIndex]
return card.getValue(), card.getSuit(), card.getSuitColour()
def getTopCardRect(self):
# Get the rect of top card on the CardStack; return bare rect if there are no cards
if (len(self.cards) > 0):
return self.cards[-1].getRect()
else:
left, top, w, h = self.rect.getLeftTopWidthHeight()
return Rect(left, top, w, h)
def getNextTopCardLeftTop(self):
# Get the top/left of the next card location on the stack (useful for animation)
return (self.left, self.top + self.yOffset * len(self.cards))
def popCard(self):
# Remove the top card on the CardStack; return the popped Card or None
if (len(self.cards) > 0):
card = self.cards[-1]
del self.cards[-1]
self.rect.setRect(self.left, self.top, self.cardWidth, self.cardHeight + self.yOffset * len(self.cards))
return card
else:
return None
def enclosesXY(self, x, y):
# Determine if a point lies within the CardStack
return self.rect.enclosesXY(x, y)
def drawStack(self, drawable, gc):
# Draw the stack (or the "empty stack" image) in the given drawable, using the supplied GC
if (len(self.cards) <= 0):
left, top = self.rect.getLeftTop()
drawable.draw_pixbuf(gc, self.emptyStackPixBuf, 0, 0, left, top)
elif (self.yOffset == 0):
self.cards[-1].drawCard(drawable, gc)
else:
for c in self.cards:
c.drawCard(drawable, gc)
def drawTopCard(self, drawable, gc):
# Draw the top card (or the "empty stack" image) in the given drawable, using the supplied GC
if (len(self.cards) <= 0):
left, top = self.rect.getLeftTop()
drawable.draw_pixbuf(gc, self.emptyStackPixBuf, 0, 0, left, top)
else:
self.cards[-1].drawCard(drawable, gc)
class FreeCell(object):
# The real application....
def __init__(self):
# Init the rendering objects to None for now; they will be properly populated during the expose_event handling
self.offscreenPixmap = None
self.offscreenGC = None
self.greenColour = None
self.redColour = None
self.blackColour = None
self.whiteColour = None
self.tmpPixmap = None
self.tmpGC = None
# Load the cards
self.cardPixbufs = [ gtk.gdk.pixbuf_new_from_file("%s/%02d.gif" % (CARDFOLDER, i)) for i in range(TOTALNUMCARDS) ]
# Load the "smart mode" image
self.smartModePixbuf = gtk.gdk.pixbuf_new_from_file("%s/lightning.gif" % (CARDFOLDER))
self.smartModeRect = Rect()
# All cards are supposed to be the same height and width
self.cardHeight = self.cardPixbufs[0].get_height()
self.cardWidth = self.cardPixbufs[0].get_width()
# Each group of cards (freecells, aces, columns) is stored in a list of CardStacks
# We also keep track of a bounding rect for each group and use this rect when doing hit-testing of mouse clicks
# Set up the "free cells" (4 cells in top left of screen)
self.freecellStacks = [ CardStack(0, 0, self.cardPixbufs[BLANK_BACK], -1, 0) for i in range(NUMFREECELLS) ]
self.freeCellsRect = None
# Set up the "aces" (4 cells in top right of screen); order is important!
self.acesStacks = [ CardStack(0, 0, self.cardPixbufs[CLUBS_BACK + i], i, 0) for i in range(NUMACES) ]
self.acesRect = None
# Set up the columns
self.mainCardStacks = [ CardStack(0, 0, self.cardPixbufs[BLANK_BACK], -1, self.cardHeight // 5) for i in range(NUMCOLUMNS) ]
self.mainCardsRects = None
# Keep track of all card stack moves so we can undo moves
self.undoStack = [ ]
# Initialize the cards
self.startCardOrder = []
self.setupCards()
# Default to manual play mode
self.smartPlayMode = False
# These get set properly during the configure event handler
self.windowWidth = 0
self.windowHeight = 0
self.windowFullscreen = False
# Create menus
self.menu = gtk.Menu()
menuItem = gtk.MenuItem("_New Game")
menuItem.connect("activate", self.new_game_menu_cb)
self.menu.append(menuItem)
menuItem.show()
menuItem = gtk.MenuItem("_Restart Game")
menuItem.connect("activate", self.restart_game_menu_cb)
self.menu.append(menuItem)
menuItem.show()
menuItem = gtk.MenuItem("_About...")
menuItem.connect("activate", self.about_menu_cb)
self.menu.append(menuItem)
menuItem.show()
menuItem = gtk.MenuItem("E_xit")
menuItem.connect("activate", self.exit_menu_cb)
self.menu.append(menuItem)
menuItem.show()
# Main part of window is a DrawingArea
self.drawingArea = gtk.DrawingArea()
global hildonMode
if (hildonMode):
# Main window contains a single DrawingArea; menu is attached to Hildon window
self.app = hildon.Program()
self.mainWindow = hildon.Window()
self.mainWindow.set_title("Freecell")
self.app.add_window(self.mainWindow)
self.mainWindow.add(self.drawingArea)
self.mainWindow.set_menu(self.menu)
# Hildon dialogs are different than regular Gtk dialogs
self.cardOrColumnDialog = hildon.Note("confirmation", (self.mainWindow, "Move column or card?", gtk.STOCK_DIALOG_QUESTION))
self.cardOrColumnDialog.set_button_texts ("Column", "Card")
self.youWinDialog = hildon.Note("information", (self.mainWindow, "You won!", gtk.STOCK_DIALOG_INFO))
else:
# Main window contains a VBox with a MenuBar and a DrawingArea
self.mainWindow = gtk.Window(gtk.WINDOW_TOPLEVEL)
#self.mainWindow.set_default_size(800,600)
self.drawingArea.set_size_request(800, 480)
fileMenu = gtk.MenuItem("_File")
fileMenu.set_submenu(self.menu)
menuBar = gtk.MenuBar()
menuBar.append(fileMenu)
vbox = gtk.VBox()
vbox.pack_start(menuBar, False, False, 2)
vbox.pack_end(self.drawingArea, True, True, 2)
self.mainWindow.add(vbox)
# Create the dialogs in advance and then reuse later
self.cardOrColumnDialog = gtk.Dialog(parent = self.mainWindow, flags = gtk.DIALOG_MODAL, buttons=("Column", MOVE_COLUMN_ID, "Card", MOVE_CARD_ID))
self.cardOrColumnLabel = gtk.Label("Move column or card?")
self.cardOrColumnDialog.vbox.pack_start(self.cardOrColumnLabel)
self.cardOrColumnLabel.show()
self.youWinDialog = gtk.MessageDialog(self.mainWindow, gtk.DIALOG_MODAL, gtk.MESSAGE_INFO, gtk.BUTTONS_OK, "You won!")
# Wire up the event callbacks
self.mainWindow.connect("delete_event", self.delete_event_cb)
self.mainWindow.connect("destroy", self.destroy_cb)
self.mainWindow.connect("key-press-event", self.key_press_cb)
self.mainWindow.connect("window-state-event", self.window_state_change_cb)
self.drawingArea.connect("expose_event", self.expose_event_cb)
self.drawingArea.connect("configure_event", self.configure_event_cb)
self.drawingArea.connect("button_press_event", self.button_press_event_cb)
self.drawingArea.set_events(gtk.gdk.EXPOSURE_MASK | gtk.gdk.BUTTON_PRESS_MASK)
# Create the "About" dialog
self.aboutDialog = gtk.Dialog(parent = self.mainWindow, flags = gtk.DIALOG_MODAL, buttons=(gtk.STOCK_OK, gtk.RESPONSE_ACCEPT))
#self.aboutDialog.set_geometry_hints(self.mainWindow, min_width=400, min_height=200)
self.aboutDialog.set_default_size(480,300)
self.aboutScrolledWin = gtk.ScrolledWindow()
self.aboutScrolledWin.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_ALWAYS)
self.aboutTextView = gtk.TextView()
self.aboutTextView.set_editable(False)
self.aboutTextView.get_buffer().set_text(ABOUT_TEXT)
self.aboutScrolledWin.add(self.aboutTextView)
self.aboutDialog.vbox.pack_start(self.aboutScrolledWin)
# Behold!
self.mainWindow.show_all()
# Track the currently selected card
self.selectedCardRect = Rect()
self.selectedCardStack = None
self.selectedCardType = None
self.debugMode = False
def exit_menu_cb(self, widget):
gtk.main_quit()
def restart_game_menu_cb(self, widget):
self.setupCards(False)
self.setCardRects()
self.redrawOffscreen()
self.updateRect(None)
def about_menu_cb(self, widget):
self.aboutDialog.show_all()
self.aboutDialog.run()
self.aboutDialog.hide()
def new_game_menu_cb(self, widget):
self.setupCards()
self.setCardRects()
self.redrawOffscreen()
self.updateRect(None)
def key_press_cb(self, widget, event, *args):
if (event.keyval == gtk.keysyms.F6):
if (self.windowFullscreen):
self.mainWindow.unfullscreen()
else:
self.mainWindow.fullscreen()
elif (event.keyval == gtk.keysyms.Up):
print "Up!"
self.smartPlayMode = False
self.redrawSmartModeIcon()
self.updateRect(self.smartModeRect)
elif (event.keyval == gtk.keysyms.Down):
print "Down!"
self.smartPlayMode = True
self.redrawSmartModeIcon()
self.updateRect(self.smartModeRect)
elif (event.keyval == gtk.keysyms.Left):
print "Left!"
elif (event.keyval == gtk.keysyms.Right):
print "Right!"
elif (event.keyval == gtk.keysyms.Escape):
print "Escape!"
self.undoMove()
elif (event.keyval == gtk.keysyms.Return):
print "Return!"
self.autoMoveCardsHome()
elif (event.keyval == gtk.keysyms.F7):
print "Zoom +!"
self.debugMode = False
elif (event.keyval == gtk.keysyms.F8):
print "Zoom -!"
self.debugMode = True
def autoMoveCardsHome(self):
# Move cards to the ace stacks, where possible
cardStacks = self.freecellStacks + self.mainCardStacks
while (True):
movedACard = False
for srcStack in cardStacks:
srcCardValue, srcCardSuit, srcCardSuitColour = srcStack.getCardValueSuitColour(-1)
if (srcCardSuit >= 0):
aceCardValue, aceCardSuit, aceCardSuitColour = self.acesStacks[srcCardSuit].getCardValueSuitColour(-1)
if (srcCardValue == aceCardValue + 1):
tempRect = srcStack.getTopCardRect()
self.flashRect(tempRect)
self.moveCard(srcStack, self.acesStacks[srcCardSuit])
movedACard = True
if (movedACard != True):
break
def checkGameOver(self):
# Game over?
numFullAceStacks = 0
for stack in self.acesStacks:
cardVal, cardSuit, cardColour = stack.getCardValueSuitColour(-1)
if (cardVal == CARDS_PER_SUIT - 1):
numFullAceStacks += 1
if (numFullAceStacks == NUMACES):
self.youWinDialog.show()
self.youWinDialog.run()
self.youWinDialog.hide()
def undoMove(self):
# Undo a move
if (len(self.undoStack) > 0):
srcStack, dstStack = self.undoStack[-1]
self.moveCard(dstStack, srcStack)
# The call to moveCard actually records the undo as a move, so we need to pop the last TWO entries in the stack
del self.undoStack[-1]
del self.undoStack[-1]
self.clearCardSelection()
def window_state_change_cb(self, widget, event, *args):
# Handle a window state change to/from fullscreen
logging.info("window_state_change_cb")
if (event.new_window_state & gtk.gdk.WINDOW_STATE_FULLSCREEN):
self.windowFullscreen = True
else:
self.windowFullscreen = False
def setupCards(self, doShuffle = True):
# Shuffle deck, distribute cards into the columns
self.undoStack = [ ]
self.acesStacks = [ CardStack(0, 0, self.cardPixbufs[CLUBS_BACK + i], i, 0) for i in range(NUMACES) ]
self.freecellStacks = [ CardStack(0, 0, self.cardPixbufs[BLANK_BACK], -1, 0) for i in range(NUMFREECELLS) ]
if (doShuffle):
cards = [i for i in range(NUMCARDS)]
random.shuffle(cards)
self.startCardOrder = cards
else:
cards = self.startCardOrder
for i in range(NUMCOLUMNS):
self.mainCardStacks[i].clearStack()
for i in range(NUMCARDS):
cardNum = cards[i]
cardCol = i % NUMCOLUMNS
newCard = Card(cardNum, pixBuf = self.cardPixbufs[cardNum])
self.mainCardStacks[cardCol].pushCard(newCard)
def getStackListEnclosingRect(self, cardStackList):
# Get a rect that encloses all the cards in the given list of CardStacks
rect = cardStackList[0].getRect()
for i in range(1, len(cardStackList)):
rect.unionWith(cardStackList[i].getRect())
return rect
def setCardRects(self):
# Set the position of all card stacks; this is done in response to a configure event
# Set location of main stacks of cards
cardHorizSpacing = self.windowWidth / 8.0
for i in range(NUMCOLUMNS):
x = int(i * cardHorizSpacing + (cardHorizSpacing - self.cardWidth) // 2)
self.mainCardStacks[i].setLeftTop(x, VERT_SEPARATOR_WIDTH + self.cardHeight + VERT_SEPARATOR_WIDTH)
# Set location of free cells and aces
cardHorizSpacing = self.windowWidth / 8.5
for i in range(NUMFREECELLS):
x = i * cardHorizSpacing + (cardHorizSpacing - self.cardWidth) // 2
self.freecellStacks[i].setLeftTop(x, VERT_SEPARATOR_WIDTH)
x = int((i + NUMFREECELLS + 0.5) * cardHorizSpacing + (cardHorizSpacing - self.cardWidth) // 2)
self.acesStacks[i].setLeftTop(x, VERT_SEPARATOR_WIDTH)
# Get the enclosing rects for click-testing
self.mainCardsRects = self.getStackListEnclosingRect(self.acesStacks)
self.freeCellsRect = self.getStackListEnclosingRect(self.freecellStacks)
self.acesRect = self.getStackListEnclosingRect(self.acesStacks)
def delete_event_cb(self, widget, event, data=None):
# False means okay to delete
return False
def destroy_cb(self, widget, data=None):
# Tell gtk to quit
gtk.main_quit()
def flashRect(self, rect, repeats = 3):
# Flash/invert a rect onscreen
if (rect == None):
return
for i in range(repeats):
self.invertRect(rect)
gtk.gdk.window_process_all_updates()
time.sleep(0.125)
def updateRect(self, rect):
# Queue a redraw of an onscreen rect
if (rect == None):
x, y, w, h = 0, 0, self.windowWidth, self.windowHeight
else:
x, y, w, h = rect.getLeftTopWidthHeight()
#logging.info("updateRect: (%d,%d) %dx%d" % (x, y, w + 1, h + 1))
self.drawingArea.queue_draw_area(x, y, w + 1, h + 1)
def invertRect(self, rect):
# Invert a rect onscreen
if (rect == None):
return
x, y, w, h = rect.getLeftTopWidthHeight()
self.drawingAreaGC.set_foreground(self.whiteColour)
self.drawingAreaGC.set_function(gtk.gdk.XOR)
self.drawingArea.window.draw_rectangle(self.drawingAreaGC, True, x, y, w, h)
self.drawingAreaGC.set_function(gtk.gdk.COPY)
def redrawSmartModeIcon(self):
# Redraw the "smart-mode" icon in the top/middle of the screen
if (self.smartPlayMode):
left, top, width, height = self.smartModeRect.getLeftTopWidthHeight()
self.offscreenPixmap.draw_pixbuf(self.offscreenGC, self.smartModePixbuf, 0, 0, left, top)
else:
self.offscreenGC.set_foreground(self.greenColour)
left, top, width, height = self.smartModeRect.getLeftTopWidthHeight()
self.offscreenPixmap.draw_rectangle(self.offscreenGC, True, left, top, width, height)
def redrawOffscreen(self):
# Redraw the game board and all card stacks
self.offscreenGC.set_foreground(self.greenColour)
width, height = self.offscreenPixmap.get_size()
self.offscreenPixmap.draw_rectangle(self.offscreenGC, True, 0, 0, width, height)
for cardStack in self.acesStacks:
cardStack.drawStack(self.offscreenPixmap, self.offscreenGC)
for cardStack in self.freecellStacks:
cardStack.drawStack(self.offscreenPixmap, self.offscreenGC)
for cardStack in self.mainCardStacks:
cardStack.drawStack(self.offscreenPixmap, self.offscreenGC)
self.redrawSmartModeIcon()
def configure_event_cb(self, widget, event):
# Handle the window configuration event at startup or when changing to/from fullscreen
logging.info("configure_event_cb")
# Allocate a Pixbuf to serve as the offscreen buffer for drawing of the game board
x, y, width, height = widget.get_allocation()
self.offscreenPixmap = gtk.gdk.Pixmap(widget.window, width, height)
self.offscreenGC = self.offscreenPixmap.new_gc()
self.greenColour = self.offscreenGC.get_colormap().alloc_color(0x0000, 0x8000, 0x0000)
self.redColour = self.offscreenGC.get_colormap().alloc_color(0xFFFF, 0x0000, 0x0000)
self.blackColour = self.offscreenGC.get_colormap().alloc_color(0x0000, 0x0000, 0x0000)
self.whiteColour = self.offscreenGC.get_colormap().alloc_color(0xFFFF, 0xFFFF, 0xFFFF)
self.drawingAreaGC = self.drawingArea.window.new_gc()
self.tmpPixmap = gtk.gdk.Pixmap(widget.window, width, height)
self.tmpGC = self.tmpPixmap.new_gc()
# Screen geometry has changed, so note new size, set CardStack locations, redraw screen
self.windowWidth = width
self.windowHeight = height
logging.debug("configure_event_cb: self.windowWidth = %d, self.windowHeight = %d" % (self.windowWidth, self.windowHeight))
# Resize has occurred, so set the card rects
self.setCardRects()
# Set the smart-mode icon rect
left = (self.windowWidth - self.smartModePixbuf.get_width()) // 2
top = 2 * VERT_SEPARATOR_WIDTH
self.smartModeRect = Rect(left, top, self.smartModePixbuf.get_width(), self.smartModePixbuf.get_height())
# Redraw everything
self.redrawOffscreen()
return True
def expose_event_cb(self, widget, event):
# Draw game board by copying from offscreen Pixbuf to onscreen window
# Gtk is apparently now double-buffered, so this is probably unnecessary
x , y, width, height = event.area
logging.debug("expose_event_cb: x=%d, y=%d, w=%d, h=%d" % (x, y, width, height))
if (self.offscreenPixmap != None):
widget.window.draw_drawable(widget.get_style().fg_gc[gtk.STATE_NORMAL], self.offscreenPixmap, x, y, x, y, width, height)
return False
def clearCardSelection(self):
# Clear the current card selection (drawn inverted)
#logging.info("clearCardSelection: (%d,%d) %dx%d" %(self.selectedCardRect.getLeftTopWidthHeight()))
if (self.selectedCardRect != None):
self.updateRect(self.selectedCardRect)
self.selectedCardRect = None
self.selectedCardType = None
self.selectedCardStack = None
def setCardSelection(self, stackType, cardStack, cardRect):
# Set the selected/highlighted card
self.invertRect(cardRect)
self.selectedCardRect = cardRect
self.selectedCardType = stackType
self.selectedCardStack = cardStack
def animateCardMove(self, card, toX,toY):
# Cutesy animation showing movement of a card from its current location to a new location
fromX,fromY,cardWidth,cardHeight = card.getLeftTopWidthHeight()
if (fromX == toX and fromY == toY):
return
deltaX, deltaY = float(toX - fromX), float(toY - fromY)
dist = math.sqrt(deltaX*deltaX + deltaY*deltaY)
speed = 10.0
numSteps = int(dist / speed)
vx, vy = deltaX / numSteps, deltaY / numSteps
updateWidth, updateHeight = cardWidth + int(abs(vx) + 0.5) + 1, cardHeight + int(abs(vy) + 0.5) + 1
prevX, prevY = fromX, fromY
for i in range(numSteps + 1):
if (i == numSteps):
# Avoid rounding issues
x, y = int(toX), int(toY)
else:
x, y = int(fromX + vx * i), int(fromY + vy * i)
left, top = min(x, prevX), min(y, prevY)
self.tmpPixmap.draw_drawable(self.tmpGC, self.offscreenPixmap, left, top, left, top, updateWidth, updateHeight)
card.drawCard(self.tmpPixmap, self.tmpGC, (x,y))
self.drawingArea.window.draw_drawable(self.drawingArea.get_style().fg_gc[gtk.STATE_NORMAL], self.tmpPixmap, left, top, left, top, updateWidth, updateHeight)
# Took me a long time to figure out that this forces screen updates and makes the animation work
gtk.gdk.window_process_all_updates()
prevX, prevY = x, y
#time.sleep(0.1)
def moveCard(self, srcStack, dstStack):
# Move a card from one stack to another
if (srcStack == dstStack):
return
srcCardVal, srcSuit, srcSuitColour = srcStack.getCardValueSuitColour(-1)
dstCardVal, dstSuit, dstSuitColour = dstStack.getCardValueSuitColour(-1)
logging.info("moveCard: move %s %d to %s %d" % (SUITNAMES[srcSuit], srcCardVal, SUITNAMES[dstSuit], dstCardVal))
self.undoStack.append((srcStack, dstStack))
x, y, w, h = srcStack.getTopCardRect().getLeftTopWidthHeight()
self.offscreenGC.set_foreground(self.greenColour)
self.offscreenPixmap.draw_rectangle(self.offscreenGC, True, x, y, w, h)
fromX, fromY = x, y
toX, toY = dstStack.getNextTopCardLeftTop()
card = srcStack.popCard()
srcStack.drawTopCard(self.offscreenPixmap, self.offscreenGC)
self.animateCardMove(card, toX, toY)
dstStack.pushCard(card)
dstStack.drawTopCard(self.offscreenPixmap, self.offscreenGC)
self.clearCardSelection()
self.checkGameOver()
def xyToCardStackInfo(self, x, y):
# Determine the card/stack at a given (x,y); return the type, rect, cardStack of the target
hitType = None
hitRect = None
hitStack = None
if (self.freeCellsRect.enclosesXY(x, y)):
for i in range(len(self.freecellStacks)):
testHitStack = self.freecellStacks[i]
if (testHitStack.enclosesXY(x, y)):
hitStack = testHitStack
hitRect = self.freecellStacks[i].getRect()
hitType = FREECELL_TYPE
break
elif (self.acesRect.enclosesXY(x, y)):
for i in range(len(self.acesStacks)):
testHitStack = self.acesStacks[i]
if (testHitStack.enclosesXY(x, y)):
hitStack = testHitStack
hitRect = self.acesStacks[i].getRect()
hitType = ACE_TYPE
break
else:
for i in range(len(self.mainCardStacks)):
testHitStack = self.mainCardStacks[i]
if (testHitStack.enclosesXY(x, y)):
hitStack = testHitStack
hitRect = self.mainCardStacks[i].getTopCardRect()
hitType = REGULAR_TYPE
break
return (hitType, hitRect, hitStack)
def button_press_event_cb(self, widget, event):
# This is the big, ugly one-- all the gameplay rules are implemented here...
x, y = event.x, event.y
dstType, dstRect, dstStack = self.xyToCardStackInfo(x, y)
if (dstStack == None or dstStack == self.selectedCardStack):
# Didn't click on a valid target, so clear the previous click selection and bail
self.clearCardSelection()
return True
if (self.selectedCardStack == None and dstStack.getNumCards() > 0 and not self.smartPlayMode):
# There was no previous selection, and smart-mode is off, so select target and bail
self.setCardSelection(dstType, dstStack, dstRect)
return True
if (self.selectedCardStack == None and dstStack.getNumCards() > 0 and self.smartPlayMode):
# No previous selection, player has clicked a valid stack, and smart-mode is on; try to move the card to an ace stack, main stack, or free cell stack
origDstType, origDstStack, origDstRect = dstType, dstStack, dstRect
# Call it srcStack to make the code clearer
srcStack = dstStack
srcCardVal, srcCardSuit, srcCardSuitColour = srcStack.getCardValueSuitColour(-1)
# Try the aces stack first
dstStack = self.acesStacks[srcCardSuit]
dstCardVal, dstCardSuit, dstCardSuitColour = dstStack.getCardValueSuitColour(-1)
if (dstCardVal == srcCardVal - 1):
self.moveCard(srcStack, dstStack)
return True
# Try a non-empty main stack
for dstStack in self.mainCardStacks:
dstCardVal, dstCardSuit, dstCardSuitColour = dstStack.getCardValueSuitColour(-1)
if (dstCardVal >= 0 and dstCardVal == srcCardVal + 1 and dstCardSuitColour != srcCardSuitColour):
self.moveCard(srcStack, dstStack)
return True
# Try an empty main stack or a freecell stack
tmpStacks = self.mainCardStacks + self.freecellStacks
for dstStack in tmpStacks:
if (dstStack.getNumCards() <= 0):
self.moveCard(srcStack, dstStack)
return True
# No joy, so just select the card and bail
self.setCardSelection(dstType, origDstStack, dstRect)
return True
# From here on out, we need a selected card, so bail if there is none
if (self.selectedCardStack == None):
return True
if (self.debugMode):
# Debug mode allows us to move a card anywhere we like, bwahahahahaha....
self.moveCard(self.selectedCardStack, dstStack)
return True
if (dstType == ACE_TYPE):
# Ace stack destination is special, so handle that here
srcStack = self.selectedCardStack
srcCardVal, srcCardSuit, srcCardSuitColour = srcStack.getCardValueSuitColour(-1)
dstCardVal, dstCardSuit, dstCardSuitColour = dstStack.getCardValueSuitColour(-1)
if (srcCardSuit == dstCardSuit and srcCardVal == dstCardVal + 1):
# Move selected card to an ace stack
self.moveCard(srcStack, dstStack)
return True
if (dstType == FREECELL_TYPE):
# Freecell stack destination is also special, so handle that here
if (dstStack.getNumCards() <= 0):
self.moveCard(self.selectedCardStack, dstStack)
return True
# This is where things get complicated as we figure out whether we are moving a single card or a run
srcStack = self.selectedCardStack
srcNumCards = srcStack.getNumCards()
srcCardVal, srcSuit, srcSuitColour = srcStack.getCardValueSuitColour(-1)
dstNumCards = dstStack.getNumCards()
dstCardVal, dstCardSuit, dstCardSuitColour = dstStack.getCardValueSuitColour(-1)
dstSrcDelta = dstCardVal - srcCardVal
logging.debug("srcSuit = %d, srcSuitColour = %d, srcCardVal = %d, srcNumCards = %d" % (srcSuit, srcSuitColour, srcCardVal, srcNumCards))
logging.debug("dstCardSuit = %d, dstCardSuitColour = %d, dstCardVal = %d" % (dstCardSuit, dstCardSuitColour, dstCardVal))
numFreeCells = 0
for cardStack in self.freecellStacks:
if (cardStack.getNumCards() <= 0):
numFreeCells += 1
runLength = 1
for i in range(1, srcNumCards):
cardVal, cardSuit, cardSuitColour = srcStack.getCardValueSuitColour(srcNumCards - i - 1)
logging.debug("card #%d: cardVal = %d, cardSuit = %d, cardSuitColour = %d" % (srcNumCards - i - 1, cardVal, cardSuit, cardSuitColour))
if (cardVal == srcCardVal + i and cardSuitColour == (srcSuitColour + i) % 2):
runLength += 1
else:
break
suitColoursWork = (srcSuitColour == (dstCardSuitColour + dstSrcDelta) % 2)
srcRunMeetsDst = dstSrcDelta > 0 and runLength >= dstSrcDelta
logging.debug("dstSrcDelta = %d, numFreeCells = %d, runLength = %d, suitColoursWork = %s, srcRunMeetsDst = %s" % (dstSrcDelta, numFreeCells, runLength, suitColoursWork, str(srcRunMeetsDst)))
if (dstNumCards <= 0 and runLength > 1 and numFreeCells > 0):
# Move a card or a column to an empty stack?
self.cardOrColumnDialog.show()
dialogResult = self.cardOrColumnDialog.run()
self.cardOrColumnDialog.hide()
# Repaint the mess made by the dialog box
self.updateRect(None)
x, y, w, h = 0, 0, self.windowWidth, self.windowHeight
self.drawingArea.window.draw_drawable(self.drawingArea.get_style().fg_gc[gtk.STATE_NORMAL], self.offscreenPixmap, x, y, x, y, w, h)
gtk.gdk.window_process_all_updates()
if (dialogResult == MOVE_CARD_ID):
runLength = 1
if (dstNumCards <= 0):
# Move a run of cards onto an empty stack
print "Move a run of cards onto an empty stack, numFreeCells=",numFreeCells," runLength=",runLength
runLength = min(numFreeCells + 1, runLength)
elif (srcRunMeetsDst and suitColoursWork and numFreeCells >= dstSrcDelta - 1):
# Moving a run onto a non-empty stack
runLength = dstSrcDelta
else:
# Move request is illegal, so assume user is just changing the active selection
self.clearCardSelection()
self.setCardSelection(dstType, dstStack, dstRect)
return True
logging.debug("Column move...")
tempStacks = [ ]
for i in range(runLength - 1):
for j in range(NUMFREECELLS):
if (self.freecellStacks[j].getNumCards() <= 0):
self.moveCard(srcStack, self.freecellStacks[j])
tempStacks.insert(0, self.freecellStacks[j])
break
self.moveCard(srcStack, dstStack)
for s in tempStacks:
self.moveCard(s, dstStack)
return True
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
freeCell = FreeCell()
gtk.main()
| Python |
#!/usr/bin/env python2.5
# Freecell4Maemo, Copyright 2008, Roy Wood
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# To Do:
# - on-screen toggle for smart move mode
# - intelligent use of empty stacks when moving columns
# - smart-move a column?
# - save game state
# - smart-mode, just set targetStack and fall through?
"""
Freecell4Maemo is an implementation of the classic Freecell cardgame for the Nokia "Maemo" platform.
The code is pretty small, and I have tried to comment it effectively throughout, so you should have be able to
figure things out pretty easily.
Some of the more significant pieces are as follows:
class Rect - a rectangle; important members are top, left, width, height
class Card - a playing card; important members are cardnum(0-51), screen location/size, pixbuf
class CardStack - a stack of Card objects; important members are screen location/size, cards, "empty stack" pixbuf, stack suit
class Freecell - the main class for the app; uses the other classes as needed
Some significant points about the main "Freecell" class are:
- the __init__ method creates all the basic object members, loads all the card images, creates the GUI
- the GUI is a single window containing a GTK DrawingArea
- all drawing is done in an offscreen PixMap
- the offscreen PixMap is blitted to the DrawingArea in response to expose events
- the offscreen PixMap is created in the configure event handler, not __init__
- all GraphicContext objects are created in the configure event handler
- the configure handler also triggers a call to set the rects of the CardStacks (important for switching between fullscreen and smallscreen)
- the real game logic is in the button_press_event handler (and yes, it gets a little messy)
"""
ABOUT_TEXT = """
Freecell for Maemo
(c) 2008 Roy Wood
roy.wood@gmail.com
http://code.google.com/p/freecell4maemo/
This game is an implementation of
the classic Freecell card game for
the Nokia Maemo platform.
To move a card, click once to select
the card, then click in the
destination location.
Click the return key (square button
in the center of the directional
keypad) to auto-move cards to the
ace stacks.
Click the escape key (swoopy arrow
keypad button) to undo a move.
This program is free software: you
can redistribute it and/or modify
it under the terms of the GNU
General Public License as published
by the Free Software Foundation,
either version 3 of the License, or
(at your option) any later version.
This program is distributed in the
hope that it will be useful, but
WITHOUT ANY WARRANTY; without
even the implied warranty of
MERCHANTABILITY or FITNESS FOR A
PARTICULAR PURPOSE. See the GNU
General Public License for more
details.
You should have received a copy of
the GNU General Public License
along with this program. If not,
a copy may be found here:
<http://www.gnu.org/licenses/>
"""
import gtk
import pygtk
import time
import random
import logging
import math
try:
import hildon
import osso
osso_c = osso.Context("com.nokia.freecell4maemo", "1.0.0", False)
hildonMode = True
except:
hildonMode = False
# Size of the inset border for the window
FULLSCREEN_BORDER_WIDTH = 10
SMALLSCREEN_BORDER_WIDTH = 2
# Border between upper/lower sets of cards
VERT_SEPARATOR_WIDTH = 10
# Suit IDs
CLUBS = 0
DIAMONDS = 1
SPADES = 2
HEARTS = 3
SUITNAMES = [ "Clubs", "Diamonds", "Spades", "Hearts" ]
# Suit colours
BLACK = 0
RED = 1
# Number of cards per suit
CARDS_PER_SUIT = 13
# Card pixbufs 0-51 are the regular cards,
NUMCARDS = 52
# Cards 52-55 are the suit-back cards (aces in top right of screen)
CLUBS_BACK = 52
DIAMONDS_BACK = 53
SPADES_BACK = 54
HEARTS_BACK = 55
# Card 56 is the the blank-back card (used to draw stacks with no cards)
BLANK_BACK = 56
# Card 57 is the fancy-back card (not currently used)
FANCY_BACK = 57
# Total number of card images
TOTALNUMCARDS = FANCY_BACK
# Number of card columns
NUMCOLUMNS = 8
# Number of "free cells"
NUMFREECELLS = 4
# Number of ace cards
NUMACES = 4
# Types of cards
FREECELL_TYPE = 0
ACE_TYPE = 1
REGULAR_TYPE = 2
# Folder containing the card images
CARDFOLDER = "/usr/share/freecell4maemo/card_images"
# Response constants for the "Move card or column" dialog (OK/CANCEL are the constants that the hildon.Note dialog returns)
MOVE_CARD_ID = gtk.RESPONSE_CANCEL
MOVE_COLUMN_ID = gtk.RESPONSE_OK
class Rect(object):
# A basic rectangle object
def __init__(self, left = 0, top = 0, width = 0, height = 0):
self.left = int(left)
self.top = int(top)
self.width = int(width)
self.height = int(height)
def setRect(self, left = 0, top = 0, width = 0, height = 0):
self.left = int(left)
self.top = int(top)
self.width = int(width)
self.height = int(height)
def enclosesXY(self, x, y):
# Determine if a point lies within the Rect
return ((x >= self.left) and (x < self.left + self.width) and (y >= self.top) and (y < self.top + self.height))
def getLeftTop(self):
return (self.left, self.top)
def getLeftTopWidthHeight(self):
return (self.left, self.top, self.width, self.height)
def unionWith(self, otherRect):
# Modify the Rect to include another Rect
left = min(self.left, otherRect.left)
right = max(self.left + self.width, otherRect.left + otherRect.width)
top = min(self.top, otherRect.top)
bottom = max(self.top + self.height, otherRect.top + otherRect.height)
self.left = left
self.top = top
self.width = (right - left)
self.height = (bottom - top)
class Card(object):
# A Card object defined by card number (0-51), screen location and size, and pixbuf
# Note that the cards are ordered A,2,3,4,5,6,7,8,9,10,J,Q,K
# The suits are ordered Clubs, Diamonds, Hearts, Spades
def __init__(self, cardNum, left = 0, top = 0, width = 0, height = 0, pixBuf = None):
self.cardNum = cardNum
self.rect = Rect(left, top, width, height)
self.pixBuf = pixBuf
def getSuit(self):
return self.cardNum // CARDS_PER_SUIT
def getSuitColour(self):
return (self.cardNum // CARDS_PER_SUIT) % 2
def getValue(self):
return self.cardNum % CARDS_PER_SUIT
def setRect(self, left = 0, top = 0, width = 0, height = 0):
self.rect.setRect(left, top, width, height)
def enclosesXY(self, x, y):
# Determine if a point lies within the Card
return self.rect.enclosesXY(x, y)
def drawCard(self, drawable, gc, xyPt = None):
# Draw the Card in the given drawable, using the supplied GC
if (xyPt != None):
left, top = xyPt
else:
left, top = self.rect.getLeftTop()
drawable.draw_pixbuf(gc, self.pixBuf, 0, 0, left, top)
def getLeftTop(self):
return self.rect.getLeftTop()
def getLeftTopWidthHeight(self):
return self.rect.getLeftTopWidthHeight()
def getRect(self):
left, top, w, h = self.rect.getLeftTopWidthHeight()
return Rect(left, top, w, h)
class CardStack(object):
# An object representing a stack of cards
# The CardStack contains a list of Card objects, possesses an onscreen location
# The CardStack can draw itself; if there are no Cards, then the emptyStackPixBuf is displayed
# The CardStack's yOffset controls the vertical offset of cards in the stack
def __init__(self, left, top, emptyStackPixBuf, stackSuit, yOffset = 0):
self.left = int(left)
self.top = int(top)
self.emptyStackPixBuf = emptyStackPixBuf
self.yOffset = yOffset
self.cardWidth = emptyStackPixBuf.get_width()
self.cardHeight = emptyStackPixBuf.get_height()
self.rect = Rect(self.left, self.top, self.cardWidth, self.cardHeight)
self.stackSuit = stackSuit
self.cards = [ ]
def getNumCards(self):
return len(self.cards)
def clearStack(self):
self.cards = [ ]
def getRect(self):
left, top, w, h = self.rect.getLeftTopWidthHeight()
return Rect(left, top, w, h)
def getLeftTopWidthHeight(self):
return self.rect.getLeftTopWidthHeight()
def setLeftTop(self, left, top):
self.left = left
self.top = top
self.rect = Rect(self.left, self.top, self.cardWidth, self.cardHeight + self.yOffset * len(self.cards))
for i in range(len(self.cards)):
self.cards[i].setRect(self.left, self.top + self.yOffset * i, self.cardWidth, self.cardHeight)
def pushCard(self, card):
card.setRect(self.left, self.top + self.yOffset * len(self.cards), self.cardWidth, self.cardHeight)
self.cards.append(card)
self.rect = Rect(self.left, self.top, self.cardWidth, self.cardHeight + self.yOffset * len(self.cards))
def getCardValueSuitColour(self, cardIndex):
# Get the card value, suit, and colour of a card on the CardStack; negative cardIndex values work the expected way (e.g. -1 is last/top card); if a bad index value is supplied, return the stack suit (i.e. ace stack suit)
if (cardIndex >= len(self.cards) or abs(cardIndex) > len(self.cards)):
return -1, self.stackSuit, self.stackSuit % 2
else:
card = self.cards[cardIndex]
return card.getValue(), card.getSuit(), card.getSuitColour()
def getTopCardRect(self):
# Get the rect of top card on the CardStack; return bare rect if there are no cards
if (len(self.cards) > 0):
return self.cards[-1].getRect()
else:
left, top, w, h = self.rect.getLeftTopWidthHeight()
return Rect(left, top, w, h)
def getNextTopCardLeftTop(self):
# Get the top/left of the next card location on the stack (useful for animation)
return (self.left, self.top + self.yOffset * len(self.cards))
def popCard(self):
# Remove the top card on the CardStack; return the popped Card or None
if (len(self.cards) > 0):
card = self.cards[-1]
del self.cards[-1]
self.rect.setRect(self.left, self.top, self.cardWidth, self.cardHeight + self.yOffset * len(self.cards))
return card
else:
return None
def enclosesXY(self, x, y):
# Determine if a point lies within the CardStack
return self.rect.enclosesXY(x, y)
def drawStack(self, drawable, gc):
# Draw the stack (or the "empty stack" image) in the given drawable, using the supplied GC
if (len(self.cards) <= 0):
left, top = self.rect.getLeftTop()
drawable.draw_pixbuf(gc, self.emptyStackPixBuf, 0, 0, left, top)
elif (self.yOffset == 0):
self.cards[-1].drawCard(drawable, gc)
else:
for c in self.cards:
c.drawCard(drawable, gc)
def drawTopCard(self, drawable, gc):
# Draw the top card (or the "empty stack" image) in the given drawable, using the supplied GC
if (len(self.cards) <= 0):
left, top = self.rect.getLeftTop()
drawable.draw_pixbuf(gc, self.emptyStackPixBuf, 0, 0, left, top)
else:
self.cards[-1].drawCard(drawable, gc)
class FreeCell(object):
# The real application....
def __init__(self):
# Init the rendering objects to None for now; they will be properly populated during the expose_event handling
self.offscreenPixmap = None
self.offscreenGC = None
self.greenColour = None
self.redColour = None
self.blackColour = None
self.whiteColour = None
self.tmpPixmap = None
self.tmpGC = None
# Load the cards
self.cardPixbufs = [ gtk.gdk.pixbuf_new_from_file("%s/%02d.gif" % (CARDFOLDER, i)) for i in range(TOTALNUMCARDS) ]
# Load the "smart mode" image
self.smartModePixbuf = gtk.gdk.pixbuf_new_from_file("%s/lightning.gif" % (CARDFOLDER))
self.smartModeRect = Rect()
# All cards are supposed to be the same height and width
self.cardHeight = self.cardPixbufs[0].get_height()
self.cardWidth = self.cardPixbufs[0].get_width()
# Each group of cards (freecells, aces, columns) is stored in a list of CardStacks
# We also keep track of a bounding rect for each group and use this rect when doing hit-testing of mouse clicks
# Set up the "free cells" (4 cells in top left of screen)
self.freecellStacks = [ CardStack(0, 0, self.cardPixbufs[BLANK_BACK], -1, 0) for i in range(NUMFREECELLS) ]
self.freeCellsRect = None
# Set up the "aces" (4 cells in top right of screen); order is important!
self.acesStacks = [ CardStack(0, 0, self.cardPixbufs[CLUBS_BACK + i], i, 0) for i in range(NUMACES) ]
self.acesRect = None
# Set up the columns
self.mainCardStacks = [ CardStack(0, 0, self.cardPixbufs[BLANK_BACK], -1, self.cardHeight // 5) for i in range(NUMCOLUMNS) ]
self.mainCardsRects = None
# Keep track of all card stack moves so we can undo moves
self.undoStack = [ ]
# Initialize the cards
self.startCardOrder = []
self.setupCards()
# Default to manual play mode
self.smartPlayMode = False
# These get set properly during the configure event handler
self.windowWidth = 0
self.windowHeight = 0
self.windowFullscreen = False
# Create menus
self.menu = gtk.Menu()
menuItem = gtk.MenuItem("_New Game")
menuItem.connect("activate", self.new_game_menu_cb)
self.menu.append(menuItem)
menuItem.show()
menuItem = gtk.MenuItem("_Restart Game")
menuItem.connect("activate", self.restart_game_menu_cb)
self.menu.append(menuItem)
menuItem.show()
menuItem = gtk.MenuItem("_About...")
menuItem.connect("activate", self.about_menu_cb)
self.menu.append(menuItem)
menuItem.show()
menuItem = gtk.MenuItem("E_xit")
menuItem.connect("activate", self.exit_menu_cb)
self.menu.append(menuItem)
menuItem.show()
# Main part of window is a DrawingArea
self.drawingArea = gtk.DrawingArea()
global hildonMode
if (hildonMode):
# Main window contains a single DrawingArea; menu is attached to Hildon window
self.app = hildon.Program()
self.mainWindow = hildon.Window()
self.mainWindow.set_title("Freecell")
self.app.add_window(self.mainWindow)
self.mainWindow.add(self.drawingArea)
self.mainWindow.set_menu(self.menu)
# Hildon dialogs are different than regular Gtk dialogs
self.cardOrColumnDialog = hildon.Note("confirmation", (self.mainWindow, "Move column or card?", gtk.STOCK_DIALOG_QUESTION))
self.cardOrColumnDialog.set_button_texts ("Column", "Card")
self.youWinDialog = hildon.Note("information", (self.mainWindow, "You won!", gtk.STOCK_DIALOG_INFO))
else:
# Main window contains a VBox with a MenuBar and a DrawingArea
self.mainWindow = gtk.Window(gtk.WINDOW_TOPLEVEL)
#self.mainWindow.set_default_size(800,600)
self.drawingArea.set_size_request(800, 480)
fileMenu = gtk.MenuItem("_File")
fileMenu.set_submenu(self.menu)
menuBar = gtk.MenuBar()
menuBar.append(fileMenu)
vbox = gtk.VBox()
vbox.pack_start(menuBar, False, False, 2)
vbox.pack_end(self.drawingArea, True, True, 2)
self.mainWindow.add(vbox)
# Create the dialogs in advance and then reuse later
self.cardOrColumnDialog = gtk.Dialog(parent = self.mainWindow, flags = gtk.DIALOG_MODAL, buttons=("Column", MOVE_COLUMN_ID, "Card", MOVE_CARD_ID))
self.cardOrColumnLabel = gtk.Label("Move column or card?")
self.cardOrColumnDialog.vbox.pack_start(self.cardOrColumnLabel)
self.cardOrColumnLabel.show()
self.youWinDialog = gtk.MessageDialog(self.mainWindow, gtk.DIALOG_MODAL, gtk.MESSAGE_INFO, gtk.BUTTONS_OK, "You won!")
# Wire up the event callbacks
self.mainWindow.connect("delete_event", self.delete_event_cb)
self.mainWindow.connect("destroy", self.destroy_cb)
self.mainWindow.connect("key-press-event", self.key_press_cb)
self.mainWindow.connect("window-state-event", self.window_state_change_cb)
self.drawingArea.connect("expose_event", self.expose_event_cb)
self.drawingArea.connect("configure_event", self.configure_event_cb)
self.drawingArea.connect("button_press_event", self.button_press_event_cb)
self.drawingArea.set_events(gtk.gdk.EXPOSURE_MASK | gtk.gdk.BUTTON_PRESS_MASK)
# Create the "About" dialog
self.aboutDialog = gtk.Dialog(parent = self.mainWindow, flags = gtk.DIALOG_MODAL, buttons=(gtk.STOCK_OK, gtk.RESPONSE_ACCEPT))
#self.aboutDialog.set_geometry_hints(self.mainWindow, min_width=400, min_height=200)
self.aboutDialog.set_default_size(480,300)
self.aboutScrolledWin = gtk.ScrolledWindow()
self.aboutScrolledWin.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_ALWAYS)
self.aboutTextView = gtk.TextView()
self.aboutTextView.set_editable(False)
self.aboutTextView.get_buffer().set_text(ABOUT_TEXT)
self.aboutScrolledWin.add(self.aboutTextView)
self.aboutDialog.vbox.pack_start(self.aboutScrolledWin)
# Behold!
self.mainWindow.show_all()
# Track the currently selected card
self.selectedCardRect = Rect()
self.selectedCardStack = None
self.selectedCardType = None
self.debugMode = False
def exit_menu_cb(self, widget):
gtk.main_quit()
def restart_game_menu_cb(self, widget):
self.setupCards(False)
self.setCardRects()
self.redrawOffscreen()
self.updateRect(None)
def about_menu_cb(self, widget):
self.aboutDialog.show_all()
self.aboutDialog.run()
self.aboutDialog.hide()
def new_game_menu_cb(self, widget):
self.setupCards()
self.setCardRects()
self.redrawOffscreen()
self.updateRect(None)
def key_press_cb(self, widget, event, *args):
if (event.keyval == gtk.keysyms.F6):
if (self.windowFullscreen):
self.mainWindow.unfullscreen()
else:
self.mainWindow.fullscreen()
elif (event.keyval == gtk.keysyms.Up):
print "Up!"
self.smartPlayMode = False
self.redrawSmartModeIcon()
self.updateRect(self.smartModeRect)
elif (event.keyval == gtk.keysyms.Down):
print "Down!"
self.smartPlayMode = True
self.redrawSmartModeIcon()
self.updateRect(self.smartModeRect)
elif (event.keyval == gtk.keysyms.Left):
print "Left!"
elif (event.keyval == gtk.keysyms.Right):
print "Right!"
elif (event.keyval == gtk.keysyms.Escape):
print "Escape!"
self.undoMove()
elif (event.keyval == gtk.keysyms.Return):
print "Return!"
self.autoMoveCardsHome()
elif (event.keyval == gtk.keysyms.F7):
print "Zoom +!"
self.debugMode = False
elif (event.keyval == gtk.keysyms.F8):
print "Zoom -!"
self.debugMode = True
def autoMoveCardsHome(self):
# Move cards to the ace stacks, where possible
cardStacks = self.freecellStacks + self.mainCardStacks
while (True):
movedACard = False
for srcStack in cardStacks:
srcCardValue, srcCardSuit, srcCardSuitColour = srcStack.getCardValueSuitColour(-1)
if (srcCardSuit >= 0):
aceCardValue, aceCardSuit, aceCardSuitColour = self.acesStacks[srcCardSuit].getCardValueSuitColour(-1)
if (srcCardValue == aceCardValue + 1):
tempRect = srcStack.getTopCardRect()
self.flashRect(tempRect)
self.moveCard(srcStack, self.acesStacks[srcCardSuit])
movedACard = True
if (movedACard != True):
break
def checkGameOver(self):
# Game over?
numFullAceStacks = 0
for stack in self.acesStacks:
cardVal, cardSuit, cardColour = stack.getCardValueSuitColour(-1)
if (cardVal == CARDS_PER_SUIT - 1):
numFullAceStacks += 1
if (numFullAceStacks == NUMACES):
self.youWinDialog.show()
self.youWinDialog.run()
self.youWinDialog.hide()
def undoMove(self):
# Undo a move
if (len(self.undoStack) > 0):
srcStack, dstStack = self.undoStack[-1]
self.moveCard(dstStack, srcStack)
# The call to moveCard actually records the undo as a move, so we need to pop the last TWO entries in the stack
del self.undoStack[-1]
del self.undoStack[-1]
self.clearCardSelection()
def window_state_change_cb(self, widget, event, *args):
# Handle a window state change to/from fullscreen
logging.info("window_state_change_cb")
if (event.new_window_state & gtk.gdk.WINDOW_STATE_FULLSCREEN):
self.windowFullscreen = True
else:
self.windowFullscreen = False
def setupCards(self, doShuffle = True):
# Shuffle deck, distribute cards into the columns
self.undoStack = [ ]
self.acesStacks = [ CardStack(0, 0, self.cardPixbufs[CLUBS_BACK + i], i, 0) for i in range(NUMACES) ]
self.freecellStacks = [ CardStack(0, 0, self.cardPixbufs[BLANK_BACK], -1, 0) for i in range(NUMFREECELLS) ]
if (doShuffle):
cards = [i for i in range(NUMCARDS)]
random.shuffle(cards)
self.startCardOrder = cards
else:
cards = self.startCardOrder
for i in range(NUMCOLUMNS):
self.mainCardStacks[i].clearStack()
for i in range(NUMCARDS):
cardNum = cards[i]
cardCol = i % NUMCOLUMNS
newCard = Card(cardNum, pixBuf = self.cardPixbufs[cardNum])
self.mainCardStacks[cardCol].pushCard(newCard)
def getStackListEnclosingRect(self, cardStackList):
# Get a rect that encloses all the cards in the given list of CardStacks
rect = cardStackList[0].getRect()
for i in range(1, len(cardStackList)):
rect.unionWith(cardStackList[i].getRect())
return rect
def setCardRects(self):
# Set the position of all card stacks; this is done in response to a configure event
# Set location of main stacks of cards
cardHorizSpacing = self.windowWidth / 8.0
for i in range(NUMCOLUMNS):
x = int(i * cardHorizSpacing + (cardHorizSpacing - self.cardWidth) // 2)
self.mainCardStacks[i].setLeftTop(x, VERT_SEPARATOR_WIDTH + self.cardHeight + VERT_SEPARATOR_WIDTH)
# Set location of free cells and aces
cardHorizSpacing = self.windowWidth / 8.5
for i in range(NUMFREECELLS):
x = i * cardHorizSpacing + (cardHorizSpacing - self.cardWidth) // 2
self.freecellStacks[i].setLeftTop(x, VERT_SEPARATOR_WIDTH)
x = int((i + NUMFREECELLS + 0.5) * cardHorizSpacing + (cardHorizSpacing - self.cardWidth) // 2)
self.acesStacks[i].setLeftTop(x, VERT_SEPARATOR_WIDTH)
# Get the enclosing rects for click-testing
self.mainCardsRects = self.getStackListEnclosingRect(self.acesStacks)
self.freeCellsRect = self.getStackListEnclosingRect(self.freecellStacks)
self.acesRect = self.getStackListEnclosingRect(self.acesStacks)
def delete_event_cb(self, widget, event, data=None):
# False means okay to delete
return False
def destroy_cb(self, widget, data=None):
# Tell gtk to quit
gtk.main_quit()
def flashRect(self, rect, repeats = 3):
# Flash/invert a rect onscreen
if (rect == None):
return
for i in range(repeats):
self.invertRect(rect)
gtk.gdk.window_process_all_updates()
time.sleep(0.125)
def updateRect(self, rect):
# Queue a redraw of an onscreen rect
if (rect == None):
x, y, w, h = 0, 0, self.windowWidth, self.windowHeight
else:
x, y, w, h = rect.getLeftTopWidthHeight()
#logging.info("updateRect: (%d,%d) %dx%d" % (x, y, w + 1, h + 1))
self.drawingArea.queue_draw_area(x, y, w + 1, h + 1)
def invertRect(self, rect):
# Invert a rect onscreen
if (rect == None):
return
x, y, w, h = rect.getLeftTopWidthHeight()
self.drawingAreaGC.set_foreground(self.whiteColour)
self.drawingAreaGC.set_function(gtk.gdk.XOR)
self.drawingArea.window.draw_rectangle(self.drawingAreaGC, True, x, y, w, h)
self.drawingAreaGC.set_function(gtk.gdk.COPY)
def redrawSmartModeIcon(self):
# Redraw the "smart-mode" icon in the top/middle of the screen
if (self.smartPlayMode):
left, top, width, height = self.smartModeRect.getLeftTopWidthHeight()
self.offscreenPixmap.draw_pixbuf(self.offscreenGC, self.smartModePixbuf, 0, 0, left, top)
else:
self.offscreenGC.set_foreground(self.greenColour)
left, top, width, height = self.smartModeRect.getLeftTopWidthHeight()
self.offscreenPixmap.draw_rectangle(self.offscreenGC, True, left, top, width, height)
def redrawOffscreen(self):
# Redraw the game board and all card stacks
self.offscreenGC.set_foreground(self.greenColour)
width, height = self.offscreenPixmap.get_size()
self.offscreenPixmap.draw_rectangle(self.offscreenGC, True, 0, 0, width, height)
for cardStack in self.acesStacks:
cardStack.drawStack(self.offscreenPixmap, self.offscreenGC)
for cardStack in self.freecellStacks:
cardStack.drawStack(self.offscreenPixmap, self.offscreenGC)
for cardStack in self.mainCardStacks:
cardStack.drawStack(self.offscreenPixmap, self.offscreenGC)
self.redrawSmartModeIcon()
def configure_event_cb(self, widget, event):
# Handle the window configuration event at startup or when changing to/from fullscreen
logging.info("configure_event_cb")
# Allocate a Pixbuf to serve as the offscreen buffer for drawing of the game board
x, y, width, height = widget.get_allocation()
self.offscreenPixmap = gtk.gdk.Pixmap(widget.window, width, height)
self.offscreenGC = self.offscreenPixmap.new_gc()
self.greenColour = self.offscreenGC.get_colormap().alloc_color(0x0000, 0x8000, 0x0000)
self.redColour = self.offscreenGC.get_colormap().alloc_color(0xFFFF, 0x0000, 0x0000)
self.blackColour = self.offscreenGC.get_colormap().alloc_color(0x0000, 0x0000, 0x0000)
self.whiteColour = self.offscreenGC.get_colormap().alloc_color(0xFFFF, 0xFFFF, 0xFFFF)
self.drawingAreaGC = self.drawingArea.window.new_gc()
self.tmpPixmap = gtk.gdk.Pixmap(widget.window, width, height)
self.tmpGC = self.tmpPixmap.new_gc()
# Screen geometry has changed, so note new size, set CardStack locations, redraw screen
self.windowWidth = width
self.windowHeight = height
logging.debug("configure_event_cb: self.windowWidth = %d, self.windowHeight = %d" % (self.windowWidth, self.windowHeight))
# Resize has occurred, so set the card rects
self.setCardRects()
# Set the smart-mode icon rect
left = (self.windowWidth - self.smartModePixbuf.get_width()) // 2
top = 2 * VERT_SEPARATOR_WIDTH
self.smartModeRect = Rect(left, top, self.smartModePixbuf.get_width(), self.smartModePixbuf.get_height())
# Redraw everything
self.redrawOffscreen()
return True
def expose_event_cb(self, widget, event):
# Draw game board by copying from offscreen Pixbuf to onscreen window
# Gtk is apparently now double-buffered, so this is probably unnecessary
x , y, width, height = event.area
logging.debug("expose_event_cb: x=%d, y=%d, w=%d, h=%d" % (x, y, width, height))
if (self.offscreenPixmap != None):
widget.window.draw_drawable(widget.get_style().fg_gc[gtk.STATE_NORMAL], self.offscreenPixmap, x, y, x, y, width, height)
return False
def clearCardSelection(self):
# Clear the current card selection (drawn inverted)
#logging.info("clearCardSelection: (%d,%d) %dx%d" %(self.selectedCardRect.getLeftTopWidthHeight()))
if (self.selectedCardRect != None):
self.updateRect(self.selectedCardRect)
self.selectedCardRect = None
self.selectedCardType = None
self.selectedCardStack = None
def setCardSelection(self, stackType, cardStack, cardRect):
# Set the selected/highlighted card
self.invertRect(cardRect)
self.selectedCardRect = cardRect
self.selectedCardType = stackType
self.selectedCardStack = cardStack
def animateCardMove(self, card, toX,toY):
# Cutesy animation showing movement of a card from its current location to a new location
fromX,fromY,cardWidth,cardHeight = card.getLeftTopWidthHeight()
if (fromX == toX and fromY == toY):
return
deltaX, deltaY = float(toX - fromX), float(toY - fromY)
dist = math.sqrt(deltaX*deltaX + deltaY*deltaY)
speed = 10.0
numSteps = int(dist / speed)
vx, vy = deltaX / numSteps, deltaY / numSteps
updateWidth, updateHeight = cardWidth + int(abs(vx) + 0.5) + 1, cardHeight + int(abs(vy) + 0.5) + 1
prevX, prevY = fromX, fromY
for i in range(numSteps + 1):
if (i == numSteps):
# Avoid rounding issues
x, y = int(toX), int(toY)
else:
x, y = int(fromX + vx * i), int(fromY + vy * i)
left, top = min(x, prevX), min(y, prevY)
self.tmpPixmap.draw_drawable(self.tmpGC, self.offscreenPixmap, left, top, left, top, updateWidth, updateHeight)
card.drawCard(self.tmpPixmap, self.tmpGC, (x,y))
self.drawingArea.window.draw_drawable(self.drawingArea.get_style().fg_gc[gtk.STATE_NORMAL], self.tmpPixmap, left, top, left, top, updateWidth, updateHeight)
# Took me a long time to figure out that this forces screen updates and makes the animation work
gtk.gdk.window_process_all_updates()
prevX, prevY = x, y
#time.sleep(0.1)
def moveCard(self, srcStack, dstStack):
# Move a card from one stack to another
if (srcStack == dstStack):
return
srcCardVal, srcSuit, srcSuitColour = srcStack.getCardValueSuitColour(-1)
dstCardVal, dstSuit, dstSuitColour = dstStack.getCardValueSuitColour(-1)
logging.info("moveCard: move %s %d to %s %d" % (SUITNAMES[srcSuit], srcCardVal, SUITNAMES[dstSuit], dstCardVal))
self.undoStack.append((srcStack, dstStack))
x, y, w, h = srcStack.getTopCardRect().getLeftTopWidthHeight()
self.offscreenGC.set_foreground(self.greenColour)
self.offscreenPixmap.draw_rectangle(self.offscreenGC, True, x, y, w, h)
fromX, fromY = x, y
toX, toY = dstStack.getNextTopCardLeftTop()
card = srcStack.popCard()
srcStack.drawTopCard(self.offscreenPixmap, self.offscreenGC)
self.animateCardMove(card, toX, toY)
dstStack.pushCard(card)
dstStack.drawTopCard(self.offscreenPixmap, self.offscreenGC)
self.clearCardSelection()
self.checkGameOver()
def xyToCardStackInfo(self, x, y):
# Determine the card/stack at a given (x,y); return the type, rect, cardStack of the target
hitType = None
hitRect = None
hitStack = None
if (self.freeCellsRect.enclosesXY(x, y)):
for i in range(len(self.freecellStacks)):
testHitStack = self.freecellStacks[i]
if (testHitStack.enclosesXY(x, y)):
hitStack = testHitStack
hitRect = self.freecellStacks[i].getRect()
hitType = FREECELL_TYPE
break
elif (self.acesRect.enclosesXY(x, y)):
for i in range(len(self.acesStacks)):
testHitStack = self.acesStacks[i]
if (testHitStack.enclosesXY(x, y)):
hitStack = testHitStack
hitRect = self.acesStacks[i].getRect()
hitType = ACE_TYPE
break
else:
for i in range(len(self.mainCardStacks)):
testHitStack = self.mainCardStacks[i]
if (testHitStack.enclosesXY(x, y)):
hitStack = testHitStack
hitRect = self.mainCardStacks[i].getTopCardRect()
hitType = REGULAR_TYPE
break
return (hitType, hitRect, hitStack)
def button_press_event_cb(self, widget, event):
# This is the big, ugly one-- all the gameplay rules are implemented here...
x, y = event.x, event.y
dstType, dstRect, dstStack = self.xyToCardStackInfo(x, y)
if (dstStack == None or dstStack == self.selectedCardStack):
# Didn't click on a valid target, so clear the previous click selection and bail
self.clearCardSelection()
return True
if (self.selectedCardStack == None and dstStack.getNumCards() > 0 and not self.smartPlayMode):
# There was no previous selection, and smart-mode is off, so select target and bail
self.setCardSelection(dstType, dstStack, dstRect)
return True
if (self.selectedCardStack == None and dstStack.getNumCards() > 0 and self.smartPlayMode):
# No previous selection, player has clicked a valid stack, and smart-mode is on; try to move the card to an ace stack, main stack, or free cell stack
origDstType, origDstStack, origDstRect = dstType, dstStack, dstRect
# Call it srcStack to make the code clearer
srcStack = dstStack
srcCardVal, srcCardSuit, srcCardSuitColour = srcStack.getCardValueSuitColour(-1)
# Try the aces stack first
dstStack = self.acesStacks[srcCardSuit]
dstCardVal, dstCardSuit, dstCardSuitColour = dstStack.getCardValueSuitColour(-1)
if (dstCardVal == srcCardVal - 1):
self.moveCard(srcStack, dstStack)
return True
# Try a non-empty main stack
for dstStack in self.mainCardStacks:
dstCardVal, dstCardSuit, dstCardSuitColour = dstStack.getCardValueSuitColour(-1)
if (dstCardVal >= 0 and dstCardVal == srcCardVal + 1 and dstCardSuitColour != srcCardSuitColour):
self.moveCard(srcStack, dstStack)
return True
# Try an empty main stack or a freecell stack
tmpStacks = self.mainCardStacks + self.freecellStacks
for dstStack in tmpStacks:
if (dstStack.getNumCards() <= 0):
self.moveCard(srcStack, dstStack)
return True
# No joy, so just select the card and bail
self.setCardSelection(dstType, origDstStack, dstRect)
return True
# From here on out, we need a selected card, so bail if there is none
if (self.selectedCardStack == None):
return True
if (self.debugMode):
# Debug mode allows us to move a card anywhere we like, bwahahahahaha....
self.moveCard(self.selectedCardStack, dstStack)
return True
if (dstType == ACE_TYPE):
# Ace stack destination is special, so handle that here
srcStack = self.selectedCardStack
srcCardVal, srcCardSuit, srcCardSuitColour = srcStack.getCardValueSuitColour(-1)
dstCardVal, dstCardSuit, dstCardSuitColour = dstStack.getCardValueSuitColour(-1)
if (srcCardSuit == dstCardSuit and srcCardVal == dstCardVal + 1):
# Move selected card to an ace stack
self.moveCard(srcStack, dstStack)
return True
if (dstType == FREECELL_TYPE):
# Freecell stack destination is also special, so handle that here
if (dstStack.getNumCards() <= 0):
self.moveCard(self.selectedCardStack, dstStack)
return True
# This is where things get complicated as we figure out whether we are moving a single card or a run
srcStack = self.selectedCardStack
srcNumCards = srcStack.getNumCards()
srcCardVal, srcSuit, srcSuitColour = srcStack.getCardValueSuitColour(-1)
dstNumCards = dstStack.getNumCards()
dstCardVal, dstCardSuit, dstCardSuitColour = dstStack.getCardValueSuitColour(-1)
dstSrcDelta = dstCardVal - srcCardVal
logging.debug("srcSuit = %d, srcSuitColour = %d, srcCardVal = %d, srcNumCards = %d" % (srcSuit, srcSuitColour, srcCardVal, srcNumCards))
logging.debug("dstCardSuit = %d, dstCardSuitColour = %d, dstCardVal = %d" % (dstCardSuit, dstCardSuitColour, dstCardVal))
numFreeCells = 0
for cardStack in self.freecellStacks:
if (cardStack.getNumCards() <= 0):
numFreeCells += 1
runLength = 1
for i in range(1, srcNumCards):
cardVal, cardSuit, cardSuitColour = srcStack.getCardValueSuitColour(srcNumCards - i - 1)
logging.debug("card #%d: cardVal = %d, cardSuit = %d, cardSuitColour = %d" % (srcNumCards - i - 1, cardVal, cardSuit, cardSuitColour))
if (cardVal == srcCardVal + i and cardSuitColour == (srcSuitColour + i) % 2):
runLength += 1
else:
break
suitColoursWork = (srcSuitColour == (dstCardSuitColour + dstSrcDelta) % 2)
srcRunMeetsDst = dstSrcDelta > 0 and runLength >= dstSrcDelta
logging.debug("dstSrcDelta = %d, numFreeCells = %d, runLength = %d, suitColoursWork = %s, srcRunMeetsDst = %s" % (dstSrcDelta, numFreeCells, runLength, suitColoursWork, str(srcRunMeetsDst)))
if (dstNumCards <= 0 and runLength > 1 and numFreeCells > 0):
# Move a card or a column to an empty stack?
self.cardOrColumnDialog.show()
dialogResult = self.cardOrColumnDialog.run()
self.cardOrColumnDialog.hide()
# Repaint the mess made by the dialog box
self.updateRect(None)
x, y, w, h = 0, 0, self.windowWidth, self.windowHeight
self.drawingArea.window.draw_drawable(self.drawingArea.get_style().fg_gc[gtk.STATE_NORMAL], self.offscreenPixmap, x, y, x, y, w, h)
gtk.gdk.window_process_all_updates()
if (dialogResult == MOVE_CARD_ID):
runLength = 1
if (dstNumCards <= 0):
# Move a run of cards onto an empty stack
print "Move a run of cards onto an empty stack, numFreeCells=",numFreeCells," runLength=",runLength
runLength = min(numFreeCells + 1, runLength)
elif (srcRunMeetsDst and suitColoursWork and numFreeCells >= dstSrcDelta - 1):
# Moving a run onto a non-empty stack
runLength = dstSrcDelta
else:
# Move request is illegal, so assume user is just changing the active selection
self.clearCardSelection()
self.setCardSelection(dstType, dstStack, dstRect)
return True
logging.debug("Column move...")
tempStacks = [ ]
for i in range(runLength - 1):
for j in range(NUMFREECELLS):
if (self.freecellStacks[j].getNumCards() <= 0):
self.moveCard(srcStack, self.freecellStacks[j])
tempStacks.insert(0, self.freecellStacks[j])
break
self.moveCard(srcStack, dstStack)
for s in tempStacks:
self.moveCard(s, dstStack)
return True
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
freeCell = FreeCell()
gtk.main()
| Python |
#====================================================================
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# ====================================================================
#
# This software consists of voluntary contributions made by many
# individuals on behalf of the Apache Software Foundation. For more
# information on the Apache Software Foundation, please see
# <http://www.apache.org/>.
#
import os
import re
import tempfile
import shutil
ignore_pattern = re.compile('^(.svn|target|bin|classes)')
java_pattern = re.compile('^.*\.java')
annot_pattern = re.compile('import org\.apache\.http\.annotation\.')
def process_dir(dir):
files = os.listdir(dir)
for file in files:
f = os.path.join(dir, file)
if os.path.isdir(f):
if not ignore_pattern.match(file):
process_dir(f)
else:
if java_pattern.match(file):
process_source(f)
def process_source(filename):
tmp = tempfile.mkstemp()
tmpfd = tmp[0]
tmpfile = tmp[1]
try:
changed = False
dst = os.fdopen(tmpfd, 'w')
try:
src = open(filename)
try:
for line in src:
if annot_pattern.match(line):
changed = True
line = line.replace('import org.apache.http.annotation.', 'import net.jcip.annotations.')
dst.write(line)
finally:
src.close()
finally:
dst.close();
if changed:
shutil.move(tmpfile, filename)
else:
os.remove(tmpfile)
except:
os.remove(tmpfile)
process_dir('.')
| Python |
'''
Check for mismatches between GNIS/Geonames name and the Freebase topic name.
Created on Jan 23, 2010
@author: tfmorris
'''
import csv
from fileinput import FileInput
import urllib
import zipfile
from FreebaseSession import FreebaseSession, getSessionFromArgs
baseurl = 'http://geonames.usgs.gov/docs/stategaz/'
filename = 'POP_PLACES_20091002.zip'
workdir = ''
def loadGnis():
zfile = zipfile.ZipFile(filename, mode='r')
file = zfile.open(zfile.infolist()[0])
reader = csv.reader(file,delimiter='|')
header = reader.next() # get rid of header row
places = {}
for r in reader:
places[r[0]] = r[1]
file.close()
zfile.close()
return places
def normalizeName(name):
return name.replace('St.','Saint').replace('Ste.','Sainte').replace("'","").lower().replace('(historical)','').strip()
def main():
# url = baseUrl + filename
# urllib.urlretrieve(url, workDir + filename)
places = loadGnis()
session = getSessionFromArgs()
q = {"t:type":"/location/citytown",
"type":"/location/location",
"gnis_feature_id" : [],
"name":None,
"id":None,
}
count = 0
mismatches = 0
for r in session.mqlreaditer([q]):
count += 1
name = r['name']
if name != name.strip():
print 'Name "%s" has leading/trailing whitespace - %s' % (name,r['id'])
name = normalizeName(name)
ids = r['gnis_feature_id']
if len(ids) > 1:
print 'Multiple GNIS feature IDs for id %s' % r['id']
for id in ids:
i = str(int(id))
if i != id:
print 'ID %s not integer - %s' % (id,r['id'])
if not i in places:
print 'No GNIS entry for id # %s %s %s' % (i,r['id'],r['name'])
elif places[i].lower().replace('(historical)','').replace("'","").strip() != name:
mismatches += 1
print '%d/%d Name mismatch -\t%s\t%s\t%s' % (mismatches, count, r['id'],r['name'],places[i])
if __name__ == '__main__':
main() | Python |
'''
Program to count votes
for a time period. We start with the desired interval and sub-divide if
necessary due to timeouts during the counting process.
@author: Tom Morris <tfmorris@gmail.com>
@license: EPL v1
'''
import sys
from datetime import datetime, timedelta
from optparse import OptionParser
import getpass
import logging
from freebase.api import HTTPMetawebSession,MetawebError
def count(session, query, start_time, end_time):
query["timestamp>="] = start_time.isoformat()
query["timestamp<="] = end_time.isoformat()
try:
result = session.mqlread(query)
# Uncomment the following line to see how small the interval got before
# the query succeeded
# print "\t".join(["",start_time.isoformat(),end_time.isoformat(),str(result)])
return result
except MetawebError, e:
if e.message.find('/api/status/error/mql/timeout') < 0:
raise e
# TODO We should really check for runaway recursion in pathological cases
total = 0
slices = 4
interval = (end_time - start_time) / slices
for i in range(0,slices-1):
t1 = start_time + i * interval
t2 = t1 + interval
total += count(session,query,t1,t2)
return total
def main():
parser = OptionParser()
parser.add_option("-s", "--host", dest="host", help="service host", default = 'api.freebase.com')
(options, args) = parser.parse_args()
host = options.host
print 'Host: %s' % host
session = HTTPMetawebSession(host)
q = {
"type": "/pipeline/vote",
# "timestamp>=": "2008-11",
"timestamp": None,
"vote_value": None,
"timestamp":None,
"creator":None,
"limit":500
}
q1 = {
"type": "/pipeline/vote",
"timestamp>=": "2008-11",
"timestamp<=": "2009-01",
"timestamp": None,
"vote_value": {
"name|=": [
"delete",
"keep",
"merge",
"skip",
"don't merge",
"left wins",
"right wins"
],
"optional": "forbidden"
},
"v:vote_value": None,
"timestamp":None,
"creator":None
}
for r in session.mqlreaditer([q]):
vote = '<null>'
if r.vote_value:
vote = r.vote_value
print "\t".join([r.timestamp, r.creator, vote])
if __name__ == '__main__':
main() | Python |
'''
Look for duplicate publishers
'''
from freebase.api import HTTPMetawebSession, MetawebError
def main():
session = HTTPMetawebSession('api.freebase.com')
query = {
"type":"/business/company",
'name':None,
"id":None
}
# suffixes = ['inc.', 'inc', 'incorporated', 'co.', 'co', 'company']
# TODO prefix AB, suffixes Ltd, Limited, LLC
# TODO make this data driven from someplace?
suffixes = ['inc', 'incorporated', 'co', 'company', 'corp', 'corporation']
count = 0
dupes = 0
start = 8298 # start index to speed up restart after failure
for suffix in suffixes:
query['name~='] = '* ' + suffix.replace('.','\\.') + '$'
response = session.mqlreaditer([query])
for r in response:
count += 1
if count < start:
continue
name = r.name
if not name:
print 'Null or missing name for %s' % (r.id)
last = ''
else:
n1 = name.lower().strip()
if n1[-1] == '.':
n1 = n1[:-1]
if not n1.endswith(suffix):
print 'No suffix match %s %s' % (name,suffix)
else:
n1 = n1[:-len(suffix)]
n1 = n1.strip()
if n1[-1] == ',':
n1 = n1[:-1]
rdup = session.mqlread([{'type':'/business/company',
'name|=':[n1 + x for x in ['',
' inc',
', inc',
' inc.',
', inc.',
' incorporated',
', incorporated',
' co',
', co',
' co.',
', co.',
' corp',
', corp',
' corp.',
', corp.',
' corporation',
' company',
', company']],
'name':None,
'id':None}])
if not rdup:
print 'ERROR: no name match %s %s' % (n1, name)
else:
if len(rdup)>1:
print 'Set of dupes:'
for rd in rdup:
dupes += 1
# print ' %d %d %s http://www.freebase.com/tools/explore%s' % (dupes, count, rd.name, rd.id)
print '\t'.join([str(dupes), str(count), rd.name, rd.id])
else:
pass
# print 'Unique name %s %s' % (n1,name)
if __name__ == '__main__':
main()
| Python |
'''
Simple demo of the geosearch API.
Find the 10 closest National Register of Historic Places listings which are
of National significance and within 50 km, using the most recently added
listing as a starting point for the search.
Requires a version of freebase-api greater than 1.0.3
@author: Tom Morris <tfmorris@gmail.com>
@license: EPL v1
'''
from freebase.api import HTTPMetawebSession
def main():
session = HTTPMetawebSession('api.freebase.com')
# Query to find the most recent site with a geolocation
q = [{
"t:type": "/base/usnris/nris_listing",
"type": "/location/location",
"id": None,
"name": None,
# A nested request for longitude makes this non-optional
"geolocation":{'longitude':None},
"timestamp": None,
"sort": "-timestamp",
"limit": 1
}]
result = session.mqlread(q)
r = result[0]
print "Using %s %s as the base location" % (r.id,r.name)
mql_filter = [{"type" : "/base/usnris/nris_listing",
"significance_level":"National"
}]
response = session.geosearch(location=r.id,
mql_filter = mql_filter,
within=50.0,order_by='distance', limit=10)
for r in response.result.features:
p = r.properties
print "%f km id: %s name: %s" % (p['geo:distance'],p.id,p.name)
if __name__ == '__main__':
main() | Python |
'''
Identify duplicate publisher topics.
Simplistic analysis currently - only finds identical name matches
(230 of 10,500 publishers)
'''
import codecs
from freebase.api import HTTPMetawebSession, MetawebError
def normalize_publisher_name(name):
# remove parenthetical expressions
# remove company identifiers (Inc., Co., Ltd, Group, etc)
# remove Publishing, Publications, Press, Books, Editions,
# normalize abbreviations (also '&' vs 'and')
# split place off <publishing place> : <publisher>
return name
def main():
session = HTTPMetawebSession('api.freebase.com')
query = [{"t:type":"/book/publishing_company",
"type":[],
"timestamp":None,
"sort":"name",
'id':None,
'name':None
}]
response = session.mqlreaditer(query)
last = ''
count = 0
dupes = 0
for r in response:
count += 1
name = r.name
if not name:
print 'Null or missing name for %s' % (r.id)
last = ''
else:
name = name.strip()
if name == last:
dupes += 1
print '%d\t%d\tDuplicate\t%s\thttp://www.freebase.com/tools/explore%s' % (dupes, count, codecs.encode(name,'utf-8'), r.id)
else:
print '%d\t%d\t\t%s\thttp://www.freebase.com/tools/explore%s' % (dupes, count, codecs.encode(name,'utf-8'), r.id)
last = name
if __name__ == '__main__':
main() | Python |
'''
Find all locations contained in French departements which which have the
departement name appended to the commune name and fix the names to match
the Freebase.com naming standards.
Freebase's convention is to store disambiguating information in structured
information, rather than as part of the name like Wikipedia does.
Although we can construct an MQL query which gets us all the candidates,
doing the final comparison and stripping of the departement name suffix
requires a little Python.
Created on October 3, 2009
@author: Tom Morris
'''
import FreebaseSession
write = False
def main():
session = FreebaseSession.getSessionFromArgs();
if write:
session.login()
query = {
"type": "/location/location",
"containedby": [{
"type": "/location/fr_department",
"name": None
}],
"name": [{
"value~=": "*\\,*",
"value": None,
"lang": "/lang/en"}],
"id": None
}
response = session.mqlreaditer([query])
changes = 0
skips = 0
for r in response:
depts = r.containedby
if (len(depts) != 1):
print 'Multiple departments' + repr(depts)
dept_name = depts[0].name
name = r.name[0].value
if not name.endswith(dept_name): # and not name.endswith(' France'):
print '\t'.join([r.id, name, 'Skipped - doesnt end with ' + dept_name])
skips +=1
else:
name = name.split(', '+dept_name)[0]
if not name:
print '\t'.join([r.id, name, 'Bad split name'])
else:
q = {"id":r.id,
"name" : {"value" : name,
"lang" : "/lang/en",
"type" : "/type/text",
"connect" : "update"}
}
if write:
status = session.mqlwrite(q)
if (status.name.connect != 'updated'):
print '\t'.join([r.id, name, 'Update Failed: ' + repr(status)])
else:
print '\t'.join([r.id, name, status.name.connect])
changes += 1
print ' '.join(('Changes: ',str(changes), 'Skips:', str(skips)))
if __name__ == '__main__':
main() | Python |
'''
Fix up bad author names from Open Library load.
@author: Tom Morris <tfmorris@gmail.com>
@license: EPL v1
'''
import sys
from bisect import bisect_left
import codecs
from datetime import datetime, timedelta
from optparse import OptionParser
import getpass
import logging
from freebase.api import HTTPMetawebSession,MetawebError
badprefix = ['jr','sr','inc','phd','ph.d','m.d','iii']
onlyprefix = ['mr','mrs','dr','sir','dame','rev']
badwords = [
'agency',
'america',
'american',
'assembly',
'association',
'associates',
'australia',
'australian',
'bank',
'board',
'britain',
'british',
'center',
'centre',
'church',
# 'citizen',
'club',
'collection',
'commission',
'committee',
'commonwealth',
'common-wealth',
'council',
'department',
'dept.',
'editors',
'federation',
# 'friend',
'foundation',
'galleries',
'gallery',
'great britain',
'institut.',
'institute',
'libraries',
'laboratory',
'library',
'limited',
'ltd.',
'magazine',
'mission',
'member', # whitelist things which start 'member of' to catch oldie worldy anon. authors
'museum',
# 'New York',
'office',
'program',
'project',
'publications',
'research',
'school',
'schools',
'secretariat',
'service',
'services',
'society', # Not 100% because of "member of ___ society"
'staff',
'trust', # more for stock holders than authors
'university',
'and',
'&',
'author',
'none',
'on',
'pseud',
'unk',
'unknown',
'with',
'by',
'gmbh',
'tbd',
'the'
]
badwordsmax = len(badwords)
def badname(s):
# TODO separate flags for compound name, corporate name, illegal characters, etc
# TODO check for bad punctuation, particularly reverse parens x) yyy (z
word = s.split()
# Check first word for things which should only be last word
f = word[0].strip().lower()
if f[-1] == '.':
f=f[:-1]
if f in badprefix:
return True
if f in onlyprefix:
del word[0]
for w in word:
lw = w.lower()
if w[-1] == '.':
w = w[:-1]
# Look for non-initials which end in period
# (allow multiple initials without spaces e.g. p.g. whodehouse)
if len(w) > 2 and w[1] != '.' and w != 'ph.d' and w != 'ste':
return True
b = bisect_left(badwords,lw)
if b >= 0 and b < badwordsmax-1 and badwords[b] == lw:
# TODO add secondary check for ambiguous words which can be personal names e.g. Service
return True
# Check against name list and kick low probability names?
# check for all caps
# check for place names
return False
def main():
parser = OptionParser()
parser.add_option('-s', '--host', dest='host', help='service host', default = 'www.freebase.com')
(options, args) = parser.parse_args()
host = options.host
badwords.sort()
print 'Host: %s' % host
session = HTTPMetawebSession(host)
q = {'t1:type':'/book/author',
't2:type':'/people/person',
'id' : None,
'name' : None,
'creator' : None,
# 'timestamp<=' : '2009-05-01',
# 'timestamp>=' : '2009-05-01',
# 'timestamp<=' : '2009-07-30',
# 'timestamp>=' : '2009-06-30',
# 'creator':[{'type':'/type/attribution',
# 'creator':'/user/book_bot'}]
}
total = 0
bad = 0
for r in session.mqlreaditer([q]):
total += 1
if r.name and badname(r.name):
bad += 1
print '\t'.join([str(bad), str(total), r.id,r.name,r.creator]).encode('utf-8')
if __name__ == '__main__':
print 'Starting at %s' % str(datetime.now())
main()
print 'Done at %s' % str(datetime.now())
| Python |
import uno
import unohelper
from json import JSONEncoder
from com.freebase.util.JsonEncode import XJsonEncode
# JsonEncode OOo Calc Add-in implementation.
# Based on example by jan@biochemfusion.com April 2009.
class JsonEncodeImpl( unohelper.Base, XJsonEncode ):
def __init__( self, ctx ):
self.ctx = ctx
self.enc = JSONEncoder()
def jsonEncode( self, s):
return self.enc.encode(s)
def fbKeyEncode( self, s):
'''Perform the special encoding needed to create a Freebase key'''
return quotekey(s.replace(' ','_'))
def createInstance( ctx ):
return JsonEncodeImpl( ctx )
g_ImplementationHelper = unohelper.ImplementationHelper()
g_ImplementationHelper.addImplementation( \
createInstance,"com.freebase.util.JsonEncode.python.JsonEncodeImpl",
("com.sun.star.sheet.AddIn",),)
# From mqlkey.py
# ========================================================================
# Copyright (c) 2007, Metaweb Technologies, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY METAWEB TECHNOLOGIES AND CONTRIBUTORS
# ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL METAWEB
# TECHNOLOGIES OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ========================================================================
import string
import re
def quotekey(ustr):
"""
quote a unicode string to turn it into a valid namespace key
"""
valid_always = string.ascii_letters + string.digits
valid_interior_only = valid_always + '_-'
if isinstance(ustr, str):
s = unicode(ustr,'utf-8')
elif isinstance(ustr, unicode):
s = ustr
else:
raise ValueError, 'quotekey() expects utf-8 string or unicode'
output = []
if s[0] in valid_always:
output.append(s[0])
else:
output.append('$%04X' % ord(s[0]))
for c in s[1:-1]:
if c in valid_interior_only:
output.append(c)
else:
output.append('$%04X' % ord(c))
if len(s) > 1:
if s[-1] in valid_always:
output.append(s[-1])
else:
output.append('$%04X' % ord(s[-1]))
return str(''.join(output))
| Python |
# ========================================================================
# Copyright (c) 2007, Metaweb Technologies, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY METAWEB TECHNOLOGIES AND CONTRIBUTORS
# ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL METAWEB
# TECHNOLOGIES OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ========================================================================
import string
import re
def quotekey(ustr):
"""
quote a unicode string to turn it into a valid namespace key
"""
valid_always = string.ascii_letters + string.digits
valid_interior_only = valid_always + '_-'
if isinstance(ustr, str):
s = unicode(ustr,'utf-8')
elif isinstance(ustr, unicode):
s = ustr
else:
raise ValueError, 'quotekey() expects utf-8 string or unicode'
output = []
if s[0] in valid_always:
output.append(s[0])
else:
output.append('$%04X' % ord(s[0]))
for c in s[1:-1]:
if c in valid_interior_only:
output.append(c)
else:
output.append('$%04X' % ord(c))
if len(s) > 1:
if s[-1] in valid_always:
output.append(s[-1])
else:
output.append('$%04X' % ord(s[-1]))
return str(''.join(output))
def unquotekey(key, encoding=None):
"""
unquote a namespace key and turn it into a unicode string
"""
valid_always = string.ascii_letters + string.digits
output = []
i = 0
while i < len(key):
if key[i] in valid_always:
output.append(key[i])
i += 1
elif key[i] in '_-' and i != 0 and i != len(key):
output.append(key[i])
i += 1
elif key[i] == '$' and i+4 < len(key):
# may raise ValueError if there are invalid characters
output.append(unichr(int(key[i+1:i+5],16)))
i += 5
else:
raise ValueError, "unquote key saw invalid character '%s' at position %d" % (key[i], i)
ustr = u''.join(output)
if encoding is None:
return ustr
return ustr.encode(encoding)
# should this also include "'()" into safe?
def urlencode_pathseg(data):
'''
urlencode for placement between slashes in an url.
'''
if isinstance(data, unicode):
data = data.encode('utf_8')
return urllib.quote(data, "~:@$!*,;=&+")
def id_to_urlid(id):
"""
convert a mql id to an id suitable for embedding in a url path.
"""
segs = id.split('/')
assert isinstance(id, str) and id != '', 'bad id "%s"' % id
if id[0] == '~':
assert len(segs) == 1
# assume valid, should check
return id
if id[0] == '#':
assert len(segs) == 1
# assume valid, should check
return '%23' + id[1:]
if id[0] != '/':
raise ValueError, 'unknown id format %s' % id
# ok, we have a slash-path
# requote components as keys and rejoin.
# urlids do not have leading slashes!!!
return '/'.join(urlencode_pathseg(unquotekey(seg)) for seg in segs[1:])
| Python |
# XML generation for JsonEncode OOo Calc Add-in
# based on DoobieDoo example by jan@biochemfusion.com April 2009.
# A unique ID for the add-in.
addin_id = "com.freebase.util.JsonEncode"
addin_version = "0.02"
addin_displayname = "A JSON encoder add-in. Provides JSONENCODE() and FBKEYENCODE() functions."
addin_publisher_link = "http://tfmorris.blogspot.com"
addin_publisher_name = "Tom Morris"
# description.xml
#
#
desc_xml = open('description.xml', 'w')
desc_xml.write('<?xml version="1.0" encoding="UTF-8"?>\n')
desc_xml.write('<description xmlns="http://openoffice.org/extensions/description/2006" \n')
desc_xml.write('xmlns:d="http://openoffice.org/extensions/description/2006" \n')
desc_xml.write('xmlns:xlink="http://www.w3.org/1999/xlink"> \n' + '\n')
desc_xml.write('<dependencies> \n')
desc_xml.write(' <OpenOffice.org-minimal-version value="2.4" d:name="OpenOffice.org 2.4"/> \n')
desc_xml.write('</dependencies> \n')
desc_xml.write('\n')
desc_xml.write('<identifier value="' + addin_id + '" /> \n')
desc_xml.write('<version value="' + addin_version + '" />\n')
desc_xml.write('<display-name><name lang="en">' + addin_displayname + '</name></display-name>\n')
desc_xml.write('<publisher><name xlink:href="' + addin_publisher_link + '" lang="en">' + addin_publisher_name + '</name></publisher>\n')
desc_xml.write('\n')
desc_xml.write('</description> \n')
desc_xml.close
def add_manifest_entry(xml_file, file_type, file_name):
xml_file.write('<manifest:file-entry manifest:media-type="application/vnd.sun.star.' + file_type + '" \n')
xml_file.write(' manifest:full-path="' + file_name + '"/> \n')
# manifest.xml
#
# List of files in package and their types.
manifest_xml = open('manifest.xml', 'w')
manifest_xml.write('<manifest:manifest>\n');
add_manifest_entry(manifest_xml, 'uno-typelibrary;type=RDB', 'XJsonEncode.rdb')
add_manifest_entry(manifest_xml, 'configuration-data', 'CalcAddIn.xcu')
add_manifest_entry(manifest_xml, 'uno-component;type=Python', 'jsonencode.py')
manifest_xml.write('</manifest:manifest> \n')
manifest_xml.close
# CalcAddIn.xcu
#
#
# instance_id references the named UNO component instantiated by Python code (that is my understanding at least).
instance_id = "com.freebase.util.JsonEncode.python.JsonEncodeImpl"
# Name of the corresponding Excel add-in if you want to share documents across OOo and Excel.
excel_addin_name = "JsonEncode.xlam"
def define_function(xml_file, function_name, description, parameters):
xml_file.write(' <node oor:name="' + function_name + '" oor:op="replace">\n')
xml_file.write(' <prop oor:name="DisplayName"><value xml:lang="en">' + function_name + '</value></prop>\n')
xml_file.write(' <prop oor:name="Description"><value xml:lang="en">' + description + '</value></prop>\n')
xml_file.write(' <prop oor:name="Category"><value>Add-In</value></prop>\n')
xml_file.write(' <prop oor:name="CompatibilityName"><value xml:lang="en">AutoAddIn.JsonEncode.' + function_name + '</value></prop>\n')
xml_file.write(' <node oor:name="Parameters">\n')
for p, desc in parameters:
# Optional parameters will have a displayname enclosed in square brackets.
p_name = p.strip("[]")
xml_file.write(' <node oor:name="' + p_name + '" oor:op="replace">\n')
xml_file.write(' <prop oor:name="DisplayName"><value xml:lang="en">' + p_name + '</value></prop>\n')
xml_file.write(' <prop oor:name="Description"><value xml:lang="en">' + desc + '</value></prop>\n')
xml_file.write(' </node>\n')
xml_file.write(' </node>\n')
xml_file.write(' </node>\n')
#
calcaddin_xml = open('CalcAddIn.xcu', 'w')
calcaddin_xml.write('<?xml version="1.0" encoding="UTF-8"?>\n')
calcaddin_xml.write('<oor:component-data xmlns:oor="http://openoffice.org/2001/registry" xmlns:xs="http://www.w3.org/2001/XMLSchema" oor:name="CalcAddIns" oor:package="org.openoffice.Office">\n')
calcaddin_xml.write('<node oor:name="AddInInfo">\n')
calcaddin_xml.write('<node oor:name="' + instance_id + '" oor:op="replace">\n')
calcaddin_xml.write('<node oor:name="AddInFunctions">\n')
define_function(calcaddin_xml, \
'jsonEncode', 'Encode string in JSON format.', \
[('s1', 'The string to be encoded as JSON.')])
define_function(calcaddin_xml, \
'fbKeyEncode', 'Encode a Wikipedia article name as a Freebase key.', \
[('s1', 'The article name to be encoded.')])
calcaddin_xml.write('</node>\n')
calcaddin_xml.write('</node>\n')
calcaddin_xml.write('</node>\n')
calcaddin_xml.write('</oor:component-data>\n')
calcaddin_xml.close
# Done
| Python |
# coding: utf-8
'''
Created on Mar 11, 2009
@author: Tom Morris <tfmorris@gmail.com>
@copyright: 2009 Thomas F. Morris
@license: Eclipse Public License v1 http://www.eclipse.org/legal/epl-v10.html
licensing arrangements)
'''
import codecs
import logging
from math import sqrt
from freebase.api import HTTPMetawebSession, MetawebError
from person import Person
SCORE_THRESHOLD = 1.9
SEARCH_RELEVANCE_THRESHOLD = 8.0 # This may change over time
class FbPerson(Person):
'''
classdocs
'''
def __init__(self):
'''
Constructor
'''
self._id = None
logging.basicConfig(level=logging.DEBUG)
logging.getLogger().setLevel(logging.WARN) # dial down freebase.api's chatty root logging
self._log = logging.getLogger('FbPerson')
self._log.setLevel(logging.DEBUG)
def name_query(self, name):
'''
Construct a query to look up a person in Freebase with the given name
(or list of name alternatives)
and return all the info we're interested in. Most things are only used
for scoring, rather than constraining the lookup, so they're optional.
'''
sub_query = [{'lang' : '/lang/en',
'link|=' : ['/type/object/name',
'/common/topic/alias'],
'type' : '/type/text'
}]
if '*' in name:
sub_query[0]['value~='] = name
elif isinstance(name,list):
sub_query[0]['value|='] = name
else:
sub_query[0]['value'] = name
query = {'/type/reflect/any_value' : sub_query,
't1:type' : '/people/person',
't2:type' : '/common/topic',
't3:type' : {'name' : '/people/deceased_person',
'optional': True},
'id' : None
}
return [self.decorate_query(query)]
def decorate_query(self, query):
query.update({'type' : [],
'name' : None,
'/common/topic/alias': [],
'/people/person/date_of_birth' : None,
'/people/deceased_person/date_of_death' : None,
'/common/topic/article': [{'id': None,'optional' : True}]
})
return query
def query(self, session, queries):
'''
Send a batch of queries to Freebase and return the set of *unique*
results. Since we're doing lookups on both name and alias, we'll see
the same topics multiple times.
'''
results = []
ids = []
# print len(repr(queries)), queries
# Split queries in chunks to keep URL under length limit
while len(queries) > 0:
size = min(4, len(queries))
query_slice = queries[:size]
del queries[:size]
try:
for query_result in session.mqlreadmulti(query_slice):
if query_result:
if isinstance(query_result, list):
for result in query_result:
if not result['id'] in ids:
ids.append(result['id'])
results.append(result)
else:
if not query_result['id'] in ids:
ids.append(query_result['id'])
results.append(query_result)
except MetawebError, detail:
self._log.error('MQL read error %s on query: %r' % (detail, query_slice))
return results
def fetch_blurbs(self, session, guids):
blurbs = []
for guid in guids:
if guid:
try:
blurb = session.trans(guid)
blurbs.append(codecs.decode(blurb,'utf-8'))
except MetawebError, detail:
self._log.error('Failed to fetch blurb for guid %s - %r' % (guid, detail) )
blurbs.append('**Error fetching %s' % guid)
else:
blurbs.append('')
return blurbs
def score(self, results, types, blurbs):
scores = []
for r,blurb in zip(results,blurbs):
score = 0
if self._birth_date:
dob = r['/people/person/date_of_birth']
if dob:
if self._birth_date == dob[:len(self._birth_date)]:
if len(self._birth_date) > 4:
score += 2.5
else:
score += 1
else:
score += -1
else:
# print codecs.encode(blurb[:60], 'utf-8')
if blurb[:120].find(self._birth_date) > 0:
score += 1
if self._death_date:
dod = r['/people/deceased_person/date_of_death']
if dod:
if self._death_date == dod[:len(self._death_date)]:
if len(self._death_date) > 4:
score += 3
else:
score += 1
else:
score += -1
elif blurb[:120].find(self._death_date) > 0:
score += 1
for t in types:
if t in r['type']:
score += 1
# Look for given & nick names in name, alias, or blurb
names = list(self._given_names)
if self._nickname:
names.append(self._nickname)
for n in names:
if r['name'] and r['name'].find(n) >= 0:
score += 0.5
elif r['/common/topic/alias']:
for a in r['/common/topic/alias']:
if a.find(n) >= 0:
score += 0.5
else:
if blurb[:60].find(n) >= 0:
score += 0.5
b = self.normalize_quotes(blurb[:60])
if b.find(self.format_full_name()) >= 0:
score += 2 # big bonus!
scores.append(score)
return scores
def mean(self, values):
if not values or len(values) == 0:
return 0
total = 0
for v in values:
total += v
return total / len(values)
def stddev(self, values):
if not values or len(values) == 0:
return 0
mean = self.mean(values)
squared_diffs = 0
for v in values:
x = v - mean
squared_diffs += (x * x)
return sqrt(squared_diffs/len(values))
def score_search(self, results):
id = None
score = 0
name = None
print ' Search found %d results for %s - above threshold:' % (len(results), self.format_name_with_dates().encode('utf-8','ignore'))
for r in results:
s = r['relevance:score']
if s > SEARCH_RELEVANCE_THRESHOLD and s > score:
print ' search result %f %s %s' % (r['relevance:score'], r['id'], r['name'].encode('utf-8','ignore'))
score = r['relevance:score']
id = r['id']
name = r['name']
return score, id, name
def resolve(self, session, types=[]):
if self._id:
return 1
types = ['/people/person', '/government/politician']
if self._death_date:
types.append('/people/deceased_person')
dob = ''
if self._birth_date:
dob = self._birth_date
search_string = (self.format_full_name() + " " + dob).encode('utf8')
search_results = session.search(search_string, type=types)
search_score, search_id, search_name = self.score_search(search_results)
if not search_id:
# Search only indexes anchor text, so if birth year wasn't in an
# anchor, it can cause search to return no results - try again with it
search_string = self.format_full_name().encode('utf8')
search_results = session.search(search_string, type=types)
search_score, search_id, search_name = self.score_search(search_results)
all_names = self.format_all_names()
removes = []
for n in all_names:
if len(n.strip().split()) == 1:
removes.append(n) # don't search on one word names
for n in removes:
all_names.remove(n)
if not self._name_suffix:
full_name = self.format_full_name()
first_mi_name = self.format_name(["first", "mi"])
for s in [', Sr.',', Jr.',' Sr.',' Jr.',' Sr',' Jr',' III',' IV']:
all_names.add(full_name + s)
if first_mi_name != full_name:
all_names.add(first_mi_name + s)
# query = [self.name_query(n) for n in all_names]
query = self.name_query([n for n in all_names])
results = self.query(session, [query])
if search_id:
search_result_found = False
for r in results:
if r['id'] == search_id:
search_result_found = True
break
if not search_result_found:
print '***First search result (%s) not in our result set, adding it' % search_id
results.extend(self.query(session, [self.decorate_query({'id':search_id})] ))
blurbs = self.fetch_blurbs(session, [(r['/common/topic/article'][0]['id'] if r['/common/topic/article'] else None) for r in results ])
if len(results) == 1:
score = self.score(results, types, blurbs)
id = results[0]['id']
if search_id:
if id != search_id:
print ('*WARNING - Search result (%f %s %s) does not match our unique (%f %s) for %s' % (search_score, search_id, search_name, score[0], id, self.format_name_with_dates())).encode('utf8')
# return 0
else:
print ('*WARNING - Search failed - our algorithm found unique (%f %s) for %s' % (score[0], id, self.format_name_with_dates())).encode('utf8')
if score > SCORE_THRESHOLD:
self._id = id
return 1
else:
print ("**Got unique match with low score %f %s for %s" % (score, id, self.format_name_with_dates())).encode('utf8')
return 0
else:
scores = self.score(results, types, blurbs)
max_score = second_score = 0
result = None
for i in range(0,len(scores)):
r = results[i]
n = ''
if r['name']:
n = codecs.encode(r['name'], 'utf-8')
print ' ', scores[i], n, r['/people/person/date_of_birth'],r['id']
if scores[i] >= max_score:
second_score = max_score
max_score = scores[i]
result = r
elif scores[i] >= second_score:
second_score = scores[i]
# Compute mean and std deviation without our top score (hopefully it's an outlier)
mean = stddev = 0.0
if scores:
if max_score in scores:
scores.remove(max_score)
mean = self.mean(scores)
stddev = self.stddev(scores)
# If top score is more than one std dev from mean, accept the guess
if result:
threshold = max(0.5, stddev)
# if max_score - mean > threshold:
if max_score - second_score > threshold: # Used to check against mean, but let's be more conservative
if search_id and result['id'] != search_id:
print ('*WARNING - Search result (%f %s %s) and our algorithm (%s) do not match for %s' % (search_score, search_id, search_name, result['id'], self.format_name_with_dates())).encode('utf8')
# else:
self._id = result['id']
print ' Selected from %d ' % len(results), 'high: %.1f' % max_score, 'second: %.1f' % second_score, 'mean: %.2f ' % mean, 'stddev: %.3f ' % stddev, result['id'], ' for ', self.format_name_with_dates().encode('utf-8', 'ignore')
return 1
else:
id = result['id']
print '*ERROR - No score above threshold. Best score: %.1f ' % max_score, 'second: %.1f' % second_score, 'mean: %.2f ' % mean, 'stddev: %.3f ' % stddev, id, ' for ', self.format_name_with_dates().encode('utf8')
# print results
return len(results)
def main():
names = [['Clarence J. Brown, Jr.', '1927-06-18'],
['William Venroe Chappell', '1922'],
# ['William Vollie "Bill" Alexander','1934'],
# ['James Allison', '1772'],
# ['Henry Brush', '1778'],
# ['Judah Philip Benjamin', '1811'],
# ['James Alexander', '1789'],
# ['James Lusk Alcorn', '1816'],
# ['James Franklin Aldrich', '1853'],
# ['Thomas Adams', '1730'],
# ['George Everett Adams', '1840'],
# ['Charles Francis Adams', '1807'],
# ['Robert Adams', '1849'],
# 'Aníbal Acevedo-Vilá',
# 'William Czar Bradley',
# 'Hazel Hempel Abel',
# 'Hazel Abel',
# 'James "Whitey" Bulger',
# 'Brockman "Brock" ADAMS',
# 'W. Todd Akin',
# 'Charles Francis ADAMS',
# 'George BAER',
]
session = HTTPMetawebSession('api.sandbox-freebase.com')
for n in names:
p = FbPerson()
if isinstance(n, str):
p.parse(n)
else:
p.parse(n[0])
p._birth_date = n[1]
found = p.resolve(session)
print n, '-', p.format_full_name(), found, repr(p._id)
pass
if __name__ == '__main__':
from freebase.api import HTTPMetawebSession, MetawebError
main() | Python |
'''
Created on Mar 6, 2009
@author: Tom Morris <tfmorris@gmail.com>
@copyright: 2009 Thomas F. Morris
@license: Eclipse Public License v1 http://www.eclipse.org/legal/epl-v10.html
'''
from __future__ import with_statement
import codecs
import csv
from datetime import datetime
from freebase.api import HTTPMetawebSession, MetawebError
from freebase_person import FbPerson
host = 'api.sandbox-freebase.com' #'api.freebase.com'
username = 'tfmorris'
password = 'password'
desired_types = ['/people/person',
'/people/deceased_person',
'/government/politician',
'/user/tfmorris/default_domain/us_congressperson'
]
def unicode_csv_reader(unicode_csv_data, dialect=csv.excel, **kwargs):
# csv.py doesn't do Unicode; encode temporarily as UTF-8:
csv_reader = csv.reader(utf_8_encoder(unicode_csv_data),
dialect=dialect, **kwargs)
for row in csv_reader:
# decode UTF-8 back to Unicode, cell by cell:
yield [unicode(cell, 'utf-8') for cell in row]
def utf_8_encoder(unicode_csv_data):
for line in unicode_csv_data:
yield line.encode('utf-8')
def name_parse(person, name):
# TODO handle parentheses
pieces = [n.strip() for n in name.split(',')]
person._family_name = pieces[0].lower().title()
p = pieces[1].find('(')
if p >= 0:
q = pieces[1].find(')', p)
if q >=0:
person._nickname = pieces[1][p + 1 : q]
pieces[1] = pieces[1][:p].strip() + ' ' + pieces[1][q + 1:].strip()
pieces[1] = pieces[1].strip()
person._given_names = pieces[1].split()
if len(pieces) > 2:
person._suffix = pieces[2]
def write_thomas_id(session, person, thomas_id):
types = ['/base/uspolitician/u_s_congressperson',
#'/base/uspolitician/topic', # For Freebase's kludgy base system
]#,'/government/politician']
query = {'id' : person._id,
'type' : [{'connect' : 'insert', 'id' : t} for t in types],
'/base/uspolitician/u_s_congressperson/thomas_id'
: {'connect' : 'insert',
'value' : thomas_id},
}
try:
status = session.mqlwrite(query)
return status
except MetawebError,e:
print '** Error on query', e, query
def main():
start_time = datetime.now()
start_count = 375 # 11243 + 139 + 238 # 10094 + 150 + 514
if username:
session = HTTPMetawebSession(host, username, password)
session.login()
else:
session = HTTPMetawebSession(host)
unique = 0
multiple = 0
zero = 0
f = codecs.open('bioguide1.csv', 'r','utf-8', 'replace')
c = unicode_csv_reader(f)
header = c.next()
count = 0
for r in c:
count += 1
if count < start_count:
continue
thomas_id = r[0]
person = FbPerson()
name_parse(person, r[1])
person._birth_date = r[2]
if r[3] != '':
person._death_date = r[3]
# found = person.resolve(session, desired_types)
found = person.resolve(session,['/government/politician'])
if found == 1:
unique += 1
print unique, zero, multiple, " Match", thomas_id, person.format_name_with_dates().encode('utf-8','ignore'), person._id
result = write_thomas_id(session, person, thomas_id)
elif found == 0:
zero += 1
print unique, zero, multiple, "No match", thomas_id, person.format_name_with_dates().encode('utf-8','ignore'), person._id
else:
multiple += 1
print unique, zero, multiple, ' ', found, "matches", thomas_id, person.format_name_with_dates().encode('utf-8','ignore'), person._id
f.close()
total = unique + zero + multiple
print 'Unique matches: ', unique, '%2.0f%% ' % (unique * 100.0 / total)
print 'Unable to match: ', zero, '%2.0f%% ' % (zero * 100.0 / total)
print 'Multiple matches: ', multiple, '%2.0f%% ' % (multiple * 100.0 / total)
print 'Total records: ', count
print 'Elapsed time: ', datetime.now() - start_time
if __name__ == '__main__':
main() | Python |
'''
Created on Mar 6, 2009
This data loader works with the GovTrack.us data file people.xml containing
all U.S. Congress people which can be fetched from
http://www.govtrack.us/data/us/<congress>/repstats/people.xml
e.g. http://www.govtrack.us/data/us/111/repstats/people.xml
@author: Tom Morris <tfmorris@gmail.com>
@copyright: 2009 Thomas F. Morris
@license: Eclipse Public License v1 http://www.eclipse.org/legal/epl-v10.html
'''
from __future__ import with_statement
import codecs
from datetime import datetime
from xml.sax import make_parser
from xml.sax.handler import ContentHandler
from freebase.api import HTTPMetawebSession, MetawebError
from freebase_person import FbPerson
host = 'api.sandbox-freebase.com' # 'api.freebase.com'
username = None #'tfmorris'
#password = 'password'
write = False
desired_types = ['/people/person',
'/people/deceased_person',
'/government/politician',
'/user/tfmorris/default_domain/us_congressperson'
]
class CongresspersonXmlHandler(ContentHandler):
'''Parse a GovTrack person.xml file'''
def __init__(self):
self.level = 0
self.person_count = 0
self.session = None
self.unique = self.zero = self.multiple = self.id_match = 0
self.current_count = 0
def setSession(self,session):
self.session = session
def parse_person(self, attrs):
p = FbPerson()
# p._id = attrs['bioguideid'] # Don't fill this in until we check Freebase
# p._name = attrs['name'] # preferred name form?
p._family_name = attrs['lastname']
p._given_names = [attrs['firstname']]
if 'middlename' in attrs:
p._given_names.append(attrs['middlename'])
if 'nickname' in attrs:
p._nickname = attrs['nickname']
if 'namemod' in attrs:
p._name_suffix = attrs['namemod']
if 'title' in attrs:
# Title is just "Rep." or "Sen." which adds no value to search - skip it
# p._title = attrs['title']
pass
if 'birthday' in attrs:
bdate = attrs['birthday']
if not bdate == '0000-00-00':
p._birth_date = bdate
if 'gender' in attrs:
p._gender = attrs['gender']
# p._religion = attrs['religion']
# No death date/year?
for a in attrs.keys():
if not a in ['bioguideid', 'gender',
'name', 'lastname', 'firstname', 'middlename',
'nickname', 'title', 'namemod', 'birthday',
'religion', 'osid', 'metavidid', 'youtubeid', 'id',
'state', 'district', # Shouldn't this be part of role?:
]:
print 'Unknown person attribute ', a
return p
def startElement(self, name, attrs):
# print ' '[:self.level],'Starting ', name, " - ", repr(attrs)
self.level += 1
# TODO push element on stack
if name == 'people':
pass
elif name == 'person':
self.person_count += 1
self.current_listing = None
self.person = self.parse_person(attrs)
if 'bioguideid' in attrs:
self.id = attrs['bioguideid']
else:
print 'Skipping entry with no BioGuide ID' + attrs['name']
# person.xml now includes presidents with no Congressional bioguide ID
return
# Resolve person against Freebase
# write ID and other info to freebase
# print self.person_count, person._id, person.format_name_with_dates()
elif name == 'role':
if attrs['startdate']:
d = datetime.strptime(attrs['startdate'],'%Y-%m-%d')
if d > datetime(2008,9,1):
self.current_count += 1
self.current_listing = ' '.join(["current", \
str(self.current_count), attrs.get('type', '??'), \
attrs.get('state', '??'), str(attrs.get('district', '??')),
])
for a in attrs.keys():
if not a in ['type', # rep or sen
'startdate',
'enddate',
'party',
'state', # 2 letter code
'district', # empty for Senate, -1 for pre-district Reps
'url',
]:
print 'Unknown role attribute ', a
elif name == 'current-committee-assignment':
for a in attrs.keys():
if not a in ['committee',
'role',
'subcommittee'
]:
print 'Unknown current-committee-assignment attribute ', a
else:
print '** Unknown element', name
return
def characters(self, ch):
# self.buffer += ch
pass
def endElement(self, name):
self.level -=1
if name == 'person':
if self.current_listing:
self.handle_person(self.session, self.person, self.id)
print self.current_listing, self.id, self.person.format_name_with_dates(), self.person._id
elif name == 'people':
total = self.unique + self.zero + self.multiple + self.id_match
print 'ID matches: ', self.id_match, '%2.0f%% ' % (self.id_match * 100.0 / total)
print 'Unique matches: ', self.unique, '%2.0f%% ' % (self.unique * 100.0 / total)
print 'Unable to match: ', self.zero, '%2.0f%% ' % (self.zero * 100.0 / total)
print 'Multiple matches: ', self.multiple, '%2.0f%% ' % (self.multiple * 100.0 / total)
print 'Total records: ', self.person_count
def handle_person(self, session, person, thomas_id):
result = self.query_thomas_id(session, thomas_id)
if result:
self.id_match += 1
id = person._id = result['id']
# TODO add code to verify against XML file
# print 'Skipping ', id, person.format_name_with_dates()
return
found = person.resolve(session,['/government/politician'])
if found == 1:
self.unique += 1
print self.person_count, self.unique, self.zero, self.multiple, \
" Match", thomas_id, \
person.format_name_with_dates().encode('utf-8','ignore'), person._id
# result = self.write_thomas_id(session, person, thomas_id)
elif found == 0:
self.zero += 1
print self.person_count, self.unique, self.zero, self.multiple, \
"No match", thomas_id, \
person.format_name_with_dates().encode('utf-8','ignore'), person._id
else:
self.multiple += 1
print self.person_count, self.unique, self.zero, self.multiple, ' ', found, "matches", \
thomas_id, person.format_name_with_dates().encode('utf-8','ignore'), person._id
def query_thomas_id(self, session,thomas_id):
types = ['/base/uspolitician/u_s_congressperson',
#'/base/uspolitician/topic', # For Freebase's kludgy base system
]#,'/government/politician']
query = {'type' : '/base/uspolitician/u_s_congressperson',
'thomas_id' : thomas_id,
'id' : None,
'guid' : None,
'name' : None,
}
try:
status = session.mqlread(query)
return status
except MetawebError,e:
print '** Error on query', e, query
def write_thomas_id(self, session, person, thomas_id):
if not write:
return
types = ['/base/uspolitician/u_s_congressperson',
#'/base/uspolitician/topic', # For Freebase's kludgy base system
]#,'/government/politician']
query = {'id' : person._id,
'type' : [{'connect' : 'insert', 'id' : t} for t in types],
'/base/uspolitician/u_s_congressperson/thomas_id'
: {'connect' : 'insert',
'value' : thomas_id},
}
try:
status = session.mqlwrite(query)
return status
except MetawebError,e:
print '** Error on query', e, query
def main():
start_time = datetime.now()
if username:
session = HTTPMetawebSession(host, username, password)
session.login()
else:
session = HTTPMetawebSession(host)
handler = CongresspersonXmlHandler()
handler.setSession(session)
xmlfile = open('people.xml')
parser = make_parser()
parser.setContentHandler(handler)
parser.parse(xmlfile)
xmlfile.close()
print 'Elapsed time: ', datetime.now() - start_time
if __name__ == '__main__':
main()
| Python |
# coding: utf-8
'''
Class to encapsulate handling of attributes associated with a person such
as name(s), birth & death dates, etc.
Includes code to attempt to parse a name into its component pieces.
Created on Mar 7, 2009
@author: Tom Morris <tfmorris@gmail.com>
@copyright: 2009 Thomas F. Morris
@license: Eclipse Public License v1 http://www.eclipse.org/legal/epl-v10.html
'''
import logging
import string
class Person(object):
'''
classdocs
'''
PREFIX_MULTI_WORD_SURNAME = ['de', 'des', 'del', 'van', 'von', 'te', 'ten', 'ter' ]
_given_names = [] # All given names in the order and with the punctuation normally used
_nickname = None # Often a shortened or familiar form of one of the given names, but not necessarily
_family_name = None # May not be used in all cases e.g. patronynmic societies
_birth_date = None
_death_date = None
_title = None # Dr., Rev., Gen. , General, etc TODO - needs more work
_name_suffix = None # Generational indicator (Jr., III) or postnominals - TODO split?
_gender = None # 'M', 'F', or 'O' (Other covers all intersexed variants)
_religion = None # TODO This needs to be modeled as more than a simple value
def __init__(self):
'''
Constructor
'''
self._log = logging.Logger('person', logging.debug)
def parse_multi_word_surname(self, pieces):
for p in pieces:
if p in self.PREFIX_MULTI_WORD_SURNAME:
self._family_name = ' '.join(pieces[pieces.index(p):])
self._given_names = pieces[:pieces.index(p)]
def parse_title(self, pieces):
'''Parse and save anything that looks like a title'''
# For now it's just any abbreviation that doesn't look like an initial
if pieces[0].endswith('.') and len(pieces[0]) > 2:
self._title = pieces[0]
pieces.remove(pieces[0])
#TODO - Compare against table of common titles (fetch from Freebase?)
def extract_nickname(self, name_string):
'''Extract and store any nickname and return remainder of string'''
# Nickname in quotes
if name_string.count('"') == 2:
pieces = name_string.split('"')
name_string = pieces[0] + pieces[2]
self._nickname = pieces[1]
# Nickname in parentheses
p1 = name_string.find('(')
p2 = name_string.find(')', p1)
if p1 >= 0 and p2 >= 0:
self._nickname = name_string[p1 + 1:p2]
name_string = name_string[:p1].strip() + ' ' + name_string[p2 + 1:].strip()
return name_string
def count_case(self, words):
'''Return a list of counts of words with upper, lower, and title case, in that order'''
count = [0, 0, 0]
for p in words:
if p.isupper():
count[0] += 1
if p.islower():
count[1] += 1
if p.istitle():
count[2] += 1
# TODO - This could be used to identify SURNAMES (but isn't)
# if counts[0] > 0 and counts[0] != len(pieces):
# # use all upper case pieces as surname hint (as long as the whole name isn't upper case)
# # convert to title case
# pass
# if counts[1] > 0 and counts[1] != len(pieces):
# # user lower case pieces as hints for van der Wald, etc ?
# pass
return count
def guess_culture(self, name):
# TODO Identify culture / class for name
return 'usa'
def parse(self, name_string, culture = 'guess'):
'''
Attempt to parse a name string into its component parts.
NOTE: This is inherently unreliable, so structured data should be used
when available.
'''
if culture == 'guess':
culture = self.guess_culture(name_string)
if culture == 'usa':
self.parse_usa(name_string)
# elif culture == 'esp':
# pass
else:
# error
self._log.error('Unknown cultural category for name')
def parse_usa(self, name_string):
commas = name_string.split(',')
if len(commas) == 2:
# could be last, first or first last, Jr.
if commas[1].find('.') > 0:
self._name_suffix = commas[1].strip()
name_string = commas[0]
else:
self._family_name = commas[0]
self._given_names = commas[1]
return
elif len(commas) > 2:
self._log.error('Can not parse strings with more than one comma')
return
name_string = self.extract_nickname(name_string)
pieces = name_string.split()
self.normalize_periods(pieces)
self.parse_title(pieces)
# counts = self.count_case(pieces)
if self.parse_multi_word_surname(pieces):
return
self._family_name = pieces[len(pieces)-1]
self._given_names = pieces[:len(pieces)-1]
def normalize_periods(self, list):
'''Split any items which have embedded periods (not at the end)'''
for item in list:
if '.' in item:
i = item.index('.')
if i >= 0 and i < len(item)-1:
pos = list.index(item)
new = [s + '.' for s in item.split('.') if s != '']
for n in new:
list.insert(pos, n)
pos += 1
list.remove(item)
def normalize_quotes(self, text):
''''
Normalize all types of quote characters to an ASCII double quote
(really a string util, but we use it to make sure we can match nicknames)
'''
quote_chars = u'\u00ab\u00bb\u2018\u2019\u201a\u201b\u201c\u201d\u201e\u201f\u275b\u275c\u275d\u275e\u301d\u301e\uff02'
for c in quote_chars:
text = text.replace(c,u'"') # very inefficient!
return text
# The following doesn't work
# lut = dict((c,u'"') for c in quote_chars)
# return text.translate(lut)
def format_name(self, options = []):
'''
Return name formatted name with specified components included.
'''
result = ''
if 'given' in options:
result += " ".join(self._given_names)
elif 'first' in options or 'fi' in options:
if len(self._given_names) > 0:
if 'fi' in options:
result += self._given_names[0][0] + '.'
else:
result += self._given_names[0]
if 'mi' in options and len(self._given_names) > 1:
result += ' ' + (' '.join([n[0] + '.' for n in self._given_names[1:] ]))
if 'middle' in options and len(self._given_names) > 1:
result += ' ' + (' '.join([n for n in self._given_names[1:] ]))
if self._nickname and 'nick' in options:
if 'given' in options or 'first' in options or 'fi' in options:
result += ' "' + self._nickname + '"'
else:
result += self._nickname
if self._family_name:
result = result + ' ' + self._family_name
if self._name_suffix and 'suffix' in options:
result = result + ', ' + self._name_suffix
if self._title and 'title' in options:
result = self._title + ' ' + result
return result.strip()
def format_full_name(self):
'''
Return name in preferred format
'''
return self.format_name(['title', 'given','nick', 'suffix' ])
def format_name_with_dates(self):
dob = '?'
if self._birth_date:
dob = self._birth_date
dod = ''
if self._death_date:
dod = self._death_date
return '%s (%s-%s)' % (self.format_full_name(), dob, dod)
def format_all_names(self):
'''
Return a list of all possible name strings in order of preference
for the culture (not the individual)
'''
result = []
# nickname last
result.append(self.format_name(['first']))
if self._nickname:
result.append(self.format_name(['nick']))
result.append(self.format_name(['first', 'mi']))
result.append(self.format_name(['first', 'mi', 'suffix']))
result.append(self.format_name(['fi', 'mi', 'suffix']))
result.append(self.format_name(['fi', 'middle', 'suffix']))
result.append(self.format_name(['middle', 'suffix'])) # TODO - low priority - do in 2nd phase?
result.append(self.format_name(['title', 'first', 'mi', 'suffix']))
result.append(self.format_name(['first', 'mi', 'nick']))
result.append(self.format_name(['first', 'mi', 'nick', 'suffix']))
result.append(self.format_name(['title', 'first', 'mi', 'nick', 'suffix']))
result.append(self.format_name(['title', 'given', 'nick', 'suffix']))
return set(result)
def main():
names = ['Jan van der Welt',
'Jean-Paul de la Rose',
'Thomas F. Morris',
'James "Whitey" Bulger',
'James “Whitey” Bulger',
'Dr. Billy Bob Thornton',
'T. Boone Pickens',
'SMITH, James, Jr.',
'Smith, James',
'John J.T. Thomas',
'Clarence J. Brown, Jr.',
]
for n in names:
p = Person()
n = p.normalize_quotes(n)
p.parse(n)
print n, '-', p._family_name, p._given_names, p._nickname, p._title
if __name__ == '__main__':
main()
| Python |
'''
Utility to check Wikipedia Human name disambiguation pages
against Freebase Person topics.
@author: Tom Morris <tfmorris@gmail.com>
@copyright 2009 Thomas F. Morris
@license Eclipse Public License v1
'''
from bfg_session import BfgSession
import bfg_wputil
from freebase.api import HTTPMetawebSession
from datetime import datetime
import logging
def fetchTopic(fbsession, wpid):
query = [{'id': None,
'name': None,
'type': [],
'key': [{
'namespace': '/wikipedia/en_id',
'value': wpid
}]
}]
topic = fbsession.mqlread(query)
if topic:
t = topic[0]
name = t.name
if not name:
name = ''
id = t.id
print '\t'.join([topic[0].id,name,','.join(t.type)]).encode('utf-8')
return topic
def wpHndisPages(bfgSession, limit):
''' A generator that yields all the Wikipedia name disambiguation pages
however they might be coded'''
# First do the simple case - articles placed in the category directly
for wpid in bfg_wputil.categoryPages(bfgSession, 'Human name disambiguation pages', limit):
yield wpid
# Now handle those which are included in the category indirectly through
# the use of a {hndis} template call
for wpid,scratch in bfg_wputil.templatePages(bfgSession, 'Hndis', limit):
yield wpid
# Ditto for Hndis pages needing cleanup
for wpid,scratch in bfg_wputil.templatePages(bfgSession, 'Hndis-cleanup', limit):
yield wpid
def main ():
logging.basicConfig(level=logging.DEBUG)
logging.getLogger().setLevel(logging.WARN) # dial down freebase.api's chatty root logging
log = logging.getLogger('language-recon')
log.setLevel(logging.DEBUG)
log.info("Beginning scan for Wikipedia disambiguation pages on Freebase at %s" % str(datetime.now()))
bfgSession = BfgSession()
fbSession = HTTPMetawebSession('http://api.freebase.com')
pages = wpHndisPages(bfgSession, 30000)
for wpid in pages:
topic = fetchTopic(fbSession, wpid)
log.info("Done at %s" % str(datetime.now()))
if __name__ == '__main__':
main()
| Python |
'''
Utility to extract Math Genealogy ID numbers for mathematicians who are
in both Wikipedia and Freebase
@author: Tom Morris <tfmorris@gmail.com>
@copyright 2009 Thomas F. Morris
@license Eclipse Public License v1
'''
from bfg_session import BfgSession
import bfg_wputil
from freebase.api import HTTPMetawebSession
from datetime import datetime
import logging
def fetchTopic(fbsession, wpid, mgid):
query = [{'id': None,
'name': None,
'type': [],
'key': [{
'namespace': '/wikipedia/en_id',
'value': wpid
}],
'/people/person/profession':[]
}]
topic = fbsession.mqlread(query)
if topic:
t = topic[0]
name = t.name
if not name:
name = ''
id = t.id
if '/common/topic' in t.type:
t.type.remove('/common/topic')
if not t.type:
t.type.append('*NONE*')
if '/people/person' in t.type:
t.type.remove('/people/person')
# http://genealogy.math.ndsu.nodak.edu/id.php?id=nnnn
print '\t'.join(['http://genealogy.math.ndsu.nodak.edu/id.php?id='+mgid,
id,name,','.join(t['/people/person/profession']),','.join(t.type)]).encode('utf-8')
return topic
def wpMathGenealogy(bfgSession, limit):
''' A generator that yields all pages with a MathGenealogy template'''
for wpid,result in bfg_wputil.templatePages(bfgSession, 'MathGenealogy', limit,
subquery={'!wex:a/template_call' : None,
'wex:tc/value' : None}):
yield wpid,result
def main ():
logging.basicConfig(level=logging.DEBUG)
logging.getLogger().setLevel(logging.WARN) # dial down freebase.api's chatty root logging
log = logging.getLogger('mathgen')
log.setLevel(logging.DEBUG)
log.info("Beginning scan at %s" % str(datetime.now()))
bfgSession = BfgSession()
fbSession = HTTPMetawebSession('http://api.freebase.com')
idmap = {}
pages = wpMathGenealogy(bfgSession, 30000)
for wpid,result in pages:
if 'wex:tc/value' in result:
# TODO we should really be checking the metadata form {'param':'id'} to distinguish
# from {'param':'name'} but we'll just check the data instead since we don't get metadata
# returned from the treequery API
for param in result['wex:tc/value']:
try:
mgid = str(int(param))
break
except ValueError:
mgid = None
pass
if mgid:
if wpid in idmap:
if idmap[wpid] != mgid:
log.error('*error* - WPID %s has two MathGen IDs %s and %s' % (wpid,idmap[wpid],mgid))
else:
# print 'duplicate entry for %s/%s' % (wpid,mgid)
pass
else:
idmap[wpid] = mgid
log.info('Number of results = %d' % len(idmap))
for wpid in idmap:
topic = fetchTopic(fbSession, wpid, idmap[wpid])
log.info("Done at %s" % str(datetime.now()))
if __name__ == '__main__':
main()
| Python |
'''
BFG (Big F*ing Graph) Session Management
Created on Aug 14, 2009
@author: Tom Morris <tfmorris@gmail.com>
@copyright 2009 Thomas F. Morris
@copyright 2007-2009 Metaweb Technologies (see notice below)
@license Eclipse Public License v1
'''
# ==================================================================
# Copyright (c) 2007, Metaweb Technologies, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY METAWEB TECHNOLOGIES AND CONTRIBUTORS
# ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL METAWEB
# TECHNOLOGIES OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ====================================================================
__version__ = '0.5'
from freebase.api.httpclients import Httplib2Client, Urllib2Client, UrlfetchClient
from freebase.api.session import Delayed, logformat,HTTPMetawebSession,MetawebError
import logging
try:
from urllib import quote_plus as urlquote_plus
except ImportError:
from urlib_stub import quote_plus as urlquote_plus
try:
import jsonlib2 as json
except ImportError:
try:
import json
except ImportError:
try:
import simplejson as json
except ImportError:
try:
# appengine provides simplejson at django.utils.simplejson
from django.utils import simplejson as json
except ImportError:
raise Exception("unable to import neither json, simplejson, jsonlib2, or django.utils.simplejson")
# Check for urlfetch first so that urlfetch is used when running the appengine SDK
try:
import google.appengine.api.urlfetch
http_client = UrlfetchClient
except ImportError:
try:
import httplib2
http_client = Httplib2Client
except ImportError:
import urllib2
httplib2 = None
http_client = Urllib2Client
def urlencode_weak(s):
return urlquote_plus(s, safe=',/:$')
# from http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/361668
class attrdict(dict):
"""A dict whose items can also be accessed as member variables.
>>> d = attrdict(a=1, b=2)
>>> d['c'] = 3
>>> print d.a, d.b, d.c
1 2 3
>>> d.b = 10
>>> print d['b']
10
# but be careful, it's easy to hide methods
>>> print d.get('c')
3
>>> d['get'] = 4
>>> print d.get('a')
Traceback (most recent call last):
TypeError: 'int' object is not callable
"""
def __init__(self, *args, **kwargs):
# adds the *args and **kwargs to self (which is a dict)
dict.__init__(self, *args, **kwargs)
self.__dict__ = self
class BfgSession(object):
'''
classdocs
'''
def __init__(self, service_url = 'http://data.labs.freebase.com'):
'''
Constructor
'''
self.log = logging.getLogger()
self.service_url = service_url
self._http_request = http_client(None, self._raise_service_error)
def treequery(self, query):
return self._query('/bfg/treequery', query)
def query(self, query):
return self._query('/bfg/query', query)
def _query(self, service, query):
"""Query the Big F*ing graph. The single argument is a dictionary
containing query parameters which will be converted to HTTP form
parameters and submitted.
Return is a list with a dictionary per result from the query.
"""
# self.log.info('%s: %s',
# service,
# Delayed(logformat, query))
subq = dict(query=query, escape=False)
# qstr = '&'.join(['%s=%s' % (urlencode_weak(unicode(k)), urlencode_weak(unicode(v)))
# for k,v in query.items()])
r = self._httpreq_json(service, form=query)
return self._result(r)
pass
def _httpreq(self, service_path, method='GET', body=None, form=None,
headers=None):
"""
make an http request to the service.
form arguments are encoded in the url, even for POST, if a non-form
content-type is given for the body.
returns a pair (resp, body)
resp is the response object and may be different depending
on whether urllib2 or httplib2 is in use?
"""
if method == 'GET':
assert body is None
if method != "GET" and method != "POST":
assert 0, 'unknown method %s' % method
url = self.service_url + service_path
if headers is None:
headers = {}
else:
headers = _normalize_headers(headers)
# this is a lousy way to parse Content-Type, where is the library?
ct = headers.get('content-type', None)
if ct is not None:
ct = ct.split(';')[0]
if body is not None:
# if body is provided, content-type had better be too
assert ct is not None
if form is not None:
qstr = '&'.join(['%s=%s' % (urlencode_weak(unicode(k)), urlencode_weak(unicode(v)))
for k,v in form.items()])
if method == 'POST':
# put the args on the url if we're putting something else
# in the body. this is used to add args to raw uploads.
if body is not None:
url += '?' + qstr
else:
if ct is None:
ct = 'application/x-www-form-urlencoded'
headers['content-type'] = ct + '; charset=utf-8'
if ct == 'multipart/form-encoded':
# TODO handle this case
raise NotImplementedError
elif ct == 'application/x-www-form-urlencoded':
body = qstr
else:
# for all methods other than POST, use the url
url += '?' + qstr
# assure the service that this isn't a CSRF form submission
headers['x-metaweb-request'] = 'Python'
if 'user-agent' not in headers:
headers['user-agent'] = 'python freebase.api-%s' % __version__
####### DEBUG MESSAGE - should check log level before generating
loglevel = self.log.getEffectiveLevel()
if loglevel <= 20: # logging.INFO = 20
if form is None:
formstr = ''
else:
formstr = '\nFORM:\n ' + '\n '.join(['%s=%s' % (k,v)
for k,v in form.items()])
if headers is None:
headerstr = ''
else:
headerstr = '\nHEADERS:\n ' + '\n '.join([('%s: %s' % (k,v))
for k,v in headers.items()])
self.log.info('%s %s%s%s', method, url, formstr, headerstr)
# just in case you decide to make SUPER ridiculous GET queries:
if len(url) > 1000 and method == "GET":
method = "POST"
url, body = url.split("?", 1)
ct = 'application/x-www-form-urlencoded'
headers['content-type'] = ct + '; charset=utf-8'
return self._http_request(url, method, body, headers)
def _result(self, r):
# self._check_mqlerror(r)
self.log.info('result: %s', Delayed(logformat, r))
return r
def _raise_service_error(self, url, status, ctype, body):
is_jsbody = (ctype.endswith('javascript')
or ctype.endswith('json'))
if str(status) == '400' and is_jsbody:
r = self._loadjson(body)
msg = r.messages[0]
raise MetawebError(u'%s %s %r' % (msg.get('code',''), msg.message, msg.info))
raise MetawebError, 'request failed: %s: %s\n%s' % (url, status, body)
def _httpreq_json(self, *args, **kws):
resp, body = self._httpreq(*args, **kws)
return self._loadjson(body)
def _loadjson(self, json_input):
# TODO really this should be accomplished by hooking
# simplejson to create attrdicts instead of dicts.
def struct2attrdict(st):
"""
copy a json structure, turning all dicts into attrdicts.
copying descends instances of dict and list, including subclasses.
"""
if isinstance(st, dict):
return attrdict([(k,struct2attrdict(v)) for k,v in st.items()])
if isinstance(st, list):
return [struct2attrdict(li) for li in st]
return st
if json_input == '':
self.log.error('the empty string is not valid json')
raise MetawebError('the empty string is not valid json')
try:
r = json.loads(json_input)
except ValueError, e:
self.log.error('error parsing json string %r' % json_input)
raise MetawebError, 'error parsing JSON string: %s' % e
return struct2attrdict(r)
| Python |
'''
Utilities for dealing with the WEX index for Wikipedia on Metaweb's BFG
Created on Oct 16, 2009
@author: Tom Morris <tfmorris@gmail.com>
@license: Eclipse Public License
'''
from bfg_session import BfgSession
from datetime import datetime
import logging
log = logging.getLogger('bfg-wputil')
def templatePages(bfgSession, template, limit, subquery={'!wex:a/template_call':None}):
'''Generator which yields tuples containing the Wikipedia page IDs
of all pages using this template. As well as the WEX template call
block which invokes the template.
NOTE: The WPIDs have the leading wexen:wpid/ stripped to make them easy
to use with Freebase. If using them with BFG, you'll need to prepend
the prefix. Template call IDs are raw e.g. wexen:tcid/6541444
so they can be easily used directly in subsequent BFG calls.
This version uses the experimental treequery API. If it goes away or breaks
the previous version of this method is available as templatePagesOld.
'''
query = {'path':'wex-index',
'query' : {'id':'Template:'+template,
'!wex:tc/template': subquery
},
'limit': limit
}
result = bfgSession.treequery(query)
if not result:
result = []
total = len(result['!wex:tc/template'])
log.info('Number of pages in with Template:%s = %d', template, total)
for r in result['!wex:tc/template']:
wpid = r['!wex:a/template_call'][0]
templateCall = r.id
if wpid.startswith('wexen:wpid/'):
wpid = wpid[len('wexen:wpid/'):]
yield wpid,r
else:
log.warn("Found subject which is not WPID - %s", wpid)
def templatePagesOld(bfgSession, template, limit):
'''Generator which yields tuples containing the Wikipedia page IDs
of all pages using this template. As well as the WEX template call
block which invokes the template.
NOTE: The WPIDs have the leading wexen:wpid/ stripped to make them easy
to use with Freebase. If using them with BFG, you'll need to prepend
the prefix. Template call IDs are raw e.g. wexen:tcid/6541444
so they can be easily used directly in subsequent BFG calls.
'''
# Get all calls to our template of interest
# http://data.labs.freebase.com/bfg/index?path=wex-index&sub=&pred=wex%3Atc%2Ftemplate&obj=Template%3AHndis
query = {'path':'wex-index',
'sub': '',
'pred':'wex:tc/template', #
'obj':'Template:'+template,
'limit': limit
}
result1 = bfgSession.query(query)
total = len(result1)
log.info('Number of pages in with Template:%s = %d', template, total)
i = 0
for r in result1:
i += 1
if i % 100 == 0:
log.debug( "%d / %d" % (i, total))
templateCall = r.s
# Find inbound subject of which this is the object
# (ie template call section in main article which is calling template)
result2 = bfgSession.query({'path':'wex-index',
'pred' : 'wex:a/template_call',
'obj': templateCall})
if len(result2) == 1:
r = result2[0]
if r.s.startswith('wexen:wpid/'):
wpid = r.s[len('wexen:wpid/'):]
result3 = bfgSession.query({'path':'wex-index',
'sub': templateCall})
yield wpid,result3
else:
log.warn("Found subject which is not WPID - %s", r.s)
else:
log.warn("Found more than one result - %s", repr(r))
def categoryPages(bfgSession, category, limit):
query = {'path' : 'wex-index',
'sub' : '',
'pred' : 'wex:a/category', #
'obj' : 'Category:'+category,
'limit' : limit
}
result = bfgSession.query(query)
total = len(result)
log.info('Number of pages in Category:%s on wikipedia = %d', category, total)
for r in result:
if r.s.startswith('wexen:wpid/'):
wpid = r.s[len('wexen:wpid/'):]
yield wpid
else:
log.warn('Non-wpid subject found %s',r.s)
def test():
print "Beginning at %s" % str(datetime.now())
bfgSession = BfgSession()
count = 0
start = datetime.now()
for wpid,templateCall in templatePages(bfgSession, 'Infobox Language', 10000):
count += 1
if count == 1:
first = datetime.now() - start
end = datetime.now()
elapsed = end - start;
print 'Got %d results in %s. Time to first result = %s' % (count, str(elapsed), str(first))
count = 0
start = datetime.now()
for wpid,templateCall in templatePagesOld(bfgSession, 'Infobox Language', 10000):
count += 1
if count == 1:
first = datetime.now() - start
end = datetime.now()
elapsed = end - start;
print 'Got %d results in %s. Time to first result = %s' % (count, str(elapsed), str(first))
if __name__ == '__main__':
test() | Python |
'''
Utility to check for and reconcile duplicate Human Language entries which
appear to come from new Wikipedia articles being created for languages and
not getting reconciled on import into Freebase.
Running time: about one hour
Latest results (15 Oct 2009) :
missing : 4
multiple matches: 226 <== duplicate topics which need merging
missing ISO1/2/3 codes: 3784
mismatched codes between Freebase and Wikipedia: 0
Created on Aug 14, 2009
@author: Tom Morris <tfmorris@gmail.com>
@copyright 2009 Thomas F. Morris
@license Eclipse Public License v1
'''
from bfg_session import BfgSession
import bfg_wputil
from freebase.api import HTTPMetawebSession
from datetime import datetime
import logging
logging.basicConfig(level=logging.DEBUG)
logging.getLogger().setLevel(logging.WARN) # dial down freebase.api's chatty root logging
log = logging.getLogger('language-recon')
log.setLevel(logging.DEBUG)
def extractParams(result):
code = {}
name = ''
for p in result:
if 'param' in p:
if p['param'].startswith('iso'):
v = p['o']
if v != 'none' and v != '-' and v != '?':
if (len(v) > 3):
# TODO make this a regex for 2 or 3 alpha characters
log.warning("**Bad ISO lang code for %s: %s" % (name, v))
code[p['param']] = v
if p['param'] == 'name':
name = p['o']
return name, code
def queryLang(fbSession, wpid, code):
'''Query Freebase by each of wpid, iso1, iso2, iso3 and see how many
different topics we end up with. Return dict of all ids '''
ids = {}
for i in range(0,4):
query = [{'id': None,
"name": None,
"key": [{
"namespace": "/wikipedia/en_id",
"value": None
}],
"type": [],
"/language/human_language/iso_639_1_code": None,
"/language/human_language/iso_639_3_code": None,
"/language/human_language/iso_639_2_code": None
}]
if i == 0:
query[0]['key'][0]['value'] = wpid
key = 'wpid'
value = wpid
else:
key = 'iso'+str(i)
if not key in code:
continue
value = code[key]
query[0]['/language/human_language/iso_639_'+str(i)+'_code'] = value
fbResult = fbSession.mqlread(query)
for fbr in fbResult:
if not '/language/human_language' in fbr.type:
log.warn(fbr.id + ' not typed as a /language/human_language')
if not fbr.id in ids:
ids[fbr.id] = {}
ids[fbr.id][key] = value
return ids
def main ():
log.info("Beginning at %s" % str(datetime.now()))
bfgSession = BfgSession()
fbSession = HTTPMetawebSession('http://api.freebase.com')
stats = {}
stats['missing'] = 0
stats['multiple'] = 0
stats['mismatch'] = 0
stats['missing_code'] = 0
for wpid,templateCall in bfg_wputil.templatePages(bfgSession, 'Infobox Language', 10000):
# Fetch all template call items (parameters, etc)
result = bfgSession.query({'path':'wex-index',
'sub': templateCall})
# Extract interesting parameters (name and iso 639 codes)
name,code = extractParams(result)
# Now query Freebase using the Wikipedia parameters to see how
# many different topics they resolve to
ids = queryLang(fbSession, wpid, code)
# The logging below will fail for unicode names when run from the console (or debugger)
# Send the output to a log file instead
if len(ids) == 0:
log.warn(name + ' no Freebase topic with WPID =' + wpid + ' or codes ' + repr(code))
stats['missing'] += 1
elif len(ids) > 1:
# TODO watch out for multiple infobox templates on the same Wikipedia page
# e.g. http://en.wikipedia.org/wiki/index.html?curid=1267995#Akeanon
log.warn(name + ' multiple Freebase topics resolved for WPID=' + wpid + ' codes=' + repr(code) + ' ' + repr(ids))
stats['multiple'] += 1
else:
fbItem = ids.items()[0]
for c in code:
if not c in fbItem:
log.warn(name + ' missing Freebase code ' + c + '=' + code[c])
stats['missing_code'] += 1
else:
if code[c] != fbItem[c]:
log.warn(name + ' code mismatch for ' + c + '=' + code[c] + '(WP)!=(FB)' + fbItem[c])
stats['mismatch'] += 1
# log.warn(name + ' keys missing for WPID= ' + wpid + ' codes=' + repr(code) + ' ' + repr(ids))
log.info("Done at %s %s" % (str(datetime.now()), repr(stats)))
if __name__ == '__main__':
main() | Python |
'''
Utility to extract National Register Information System IDs for
National Register of Historic Places listings which are
in both Wikipedia and Freebase using the BFG service to access
the Wikipedia EXtract (WEX).
A later stage of processing will user this information to see
which topics were misreconciled using previous strategies and
need to be merged (or split, but hopefully very few).
This took about 3 hrs to return 25,000 results in August, 2010.
@author: Tom Morris <tfmorris@gmail.com>
@copyright 2009 Thomas F. Morris
@license Eclipse Public License v1
'''
from bfg_session import BfgSession
import bfg_wputil
from freebase.api import HTTPMetawebSession
from datetime import datetime
import logging
def fetchTopic(fbsession, wpid, nrisid):
query = [{'id': None,
'name': None,
'/base/usnris/nris_listing/item_number' : None,
'key': [{
'namespace': '/wikipedia/en_id',
'value': wpid
}]
}]
topic = fbsession.mqlread(query)
if topic:
t = topic[0]
name = t.name
if not name:
name = ''
id = t.id
key = t['/base/usnris/nris_listing/item_number']
if not key:
key=''
print '\t'.join([nrisid,key,id,name])
return topic
def wpTemplateQuery(bfgSession, templateName, limit):
''' A generator that yields all pages with the given template'''
# We use the old method because the tree query used by the new API makes the server
# very unhappy with tens of thousands of results (even though that's not that many)
for wpid,result in bfg_wputil.templatePagesOld(bfgSession, templateName, limit):
yield wpid,result
def main ():
logging.basicConfig(level=logging.DEBUG)
logging.getLogger().setLevel(logging.WARN) # dial down freebase.api's chatty root logging
log = logging.getLogger('nris')
log.setLevel(logging.DEBUG)
log.info("Beginning scan at %s" % str(datetime.now()))
bfgSession = BfgSession()
fbSession = HTTPMetawebSession('http://api.freebase.com')
idmap = {}
serial = 0
# TODO: Encode the template name
pages = wpTemplateQuery(bfgSession, 'Infobox nrhp', 100000)
print '\t'.join(['Seq', 'wpid', 'nrisid'])
for wpid,result in pages:
serial+=1
nrisid=None
for r in result:
if 'param' in r and r['param'] == 'refnum':
try:
nrisid = str(int(r['o'].split(' ')[0]))
break
except ValueError:
nrisid = None
pass
if nrisid:
if wpid in idmap:
if idmap[wpid] != nrisid:
# Typically articles about Historical Districts with multiple infoboxes
log.error('*error* - WPID %s has two nris IDs %s and %s' % (wpid,idmap[wpid],nrisid))
else:
# print 'duplicate entry for %s/%s' % (wpid,nrisid)
pass
else:
idmap[wpid] = nrisid
print '\t'.join([str(serial), wpid, nrisid])
log.info('Number of results = %d' % len(idmap))
# for wpid in idmap:
# topic = fetchTopic(fbSession, wpid, idmap[wpid])
print "Done at %s" % str(datetime.now()) # put in output so we know it's complete
log.info("Done at %s" % str(datetime.now()))
if __name__ == '__main__':
main()
| Python |
'''
Program to count the results of a complex query. There are times when either
Freebase is being slow or our queries are too complex for them to be able to
complete without timing out. Rather than trying to get the graph engine to
count them all for us before our timeslice expires, we iteratively fetch them
in manageable size chunks that won't time out and count them on the client.
@author: Tom Morris <tfmorris@gmail.com>
@license: EPL v1
'''
import sys
from optparse import OptionParser
import getpass
import logging
from freebase.api import HTTPMetawebSession
def main():
parser = OptionParser()
parser.add_option("-s", "--host", dest="host", help="service host", default = 'api.freebase.com')
(options, args) = parser.parse_args()
host = options.host
print 'Host: %s' % host
session = HTTPMetawebSession(host)
# This is the query that we want to run, but it's too complex to complete
# It counts all topics which were created by the bot mwcl_musicbrainz
# and originally had the type /music/artist, but now are untyped
# (i.e. are only typed with /common/topic). As an additional check, we make
# sure that they don't have a key linking them to a Wikipedia article.
q1 = [{
"type": "/common/topic",
"timestamp": None,
"id": None,
"full:name": None,
"t:type" :[],
"creator": "/user/mwcl_musicbrainz",
"original:type": [{
"id": "/music/artist",
"link": {
"operation": "delete",
"valid": False,
"timestamp": None,
"creator": None
}
}],
"key": [{
"namespace": "/wikipedia/en",
"optional": "forbidden"
}],
"only:type": [{
"id": None,
"key": [{
"namespace": "/common",
"optional": "forbidden",
"value": "topic"
}],
"optional": "forbidden"
}],
"return":"count"
}]
# Instead, let's simplify and do some of our filtering in Python after
# we get the results back
q = [{
"type": "/common/topic",
"timestamp": None,
"id": None,
"name": None,
"creator": "/user/mwcl_musicbrainz",
"original:type": [{
"id": "/music/artist",
"link": {
"operation": "delete",
"valid": False,
}
}],
"key": [{
"namespace": "/wikipedia/en",
"optional": "forbidden"
}],
# Instead of filtering types, just ask for them all and filter as we count
"t:type" :[],
# "only:type": [{
# "id": None,
# "key": [{
# "namespace": "/common",
# "optional": "forbidden",
# "value": "topic"
# }],
# "optional": "forbidden"
# }],
# we'll count ourselves instead of asking for a count
# "return":"count"
# if we still have problems with timeouts, we can lower the limit
# at the expense of a little additional overhead
"limit" : 20
}]
result = session.mqlreaditer(q)
total = 0
matches = 0
for r in result:
total += 1
# If they have types in addition to /common/topic, exclude them
if len(r['t:type']) == 1:
matches += 1
print '\t'.join([str(matches),str(total),r.id,r.timestamp])
print 'Matched %d topics from a total of %d on %s' % (matches, total, host)
if __name__ == '__main__':
main() | Python |
'''
The simplest Python program for Freebase.
Display the more recently created 10 topics with their id, name, and types.
'''
from freebase.api import HTTPMetawebSession, MetawebError
COUNT = 10
def main():
session = HTTPMetawebSession('api.freebase.com')
query = [{"t:type":"/common/topic",
"type":[], # multiple types allowed, so return is a list
"timestamp":None,
"sort":"-timestamp", # sort in reverse order by creation time
'id':None,
'name':None,
"limit": COUNT
}]
response = session.mqlread(query)
print 'The last %d topics were : ' % COUNT
for r in response:
if r.name == None: # careful, it's possible to have a null name
name = '<None>'
else:
name = r.name
types = ",".join(r.type)
print '\t'.join([r.id, name, types])
if __name__ == '__main__':
main() | Python |
'''
Program to count newly created topics of a given type (or which match a query)
for a time period. We start with the desired interval and sub-divide if
necessary due to timeouts during the counting process.
As configured, this show the dramatic spike in new book authors from the
initial OpenLibrary author load.
@author: Tom Morris <tfmorris@gmail.com>
@license: EPL v1
'''
import sys
from datetime import datetime, timedelta
from optparse import OptionParser
import getpass
import logging
from freebase.api import HTTPMetawebSession,MetawebError
def count(session, query, start_time, end_time):
query["timestamp>="] = start_time.isoformat()
query["timestamp<="] = end_time.isoformat()
try:
result = session.mqlread(query)
# Uncomment the following line to see how small the interval got before
# the query succeeded
# print "\t".join(["",start_time.isoformat(),end_time.isoformat(),str(result)])
return result
except MetawebError, e:
if e.message.find('/api/status/error/mql/timeout') < 0:
raise e
# TODO We should really check for runaway recursion in pathological cases
total = 0
slices = 4
interval = (end_time - start_time) / slices
for i in range(0,slices-1):
t1 = start_time + i * interval
t2 = t1 + interval
total += count(session,query,t1,t2)
return total
def main():
start_year = 2006
end_year = 2010
types = ["/book/author","/book/book","/book/book_edition"]
parser = OptionParser()
parser.add_option("-s", "--host", dest="host", help="service host", default = 'api.freebase.com')
(options, args) = parser.parse_args()
host = options.host
print 'Host: %s' % host
session = HTTPMetawebSession(host)
q = {"type":"/book/author",
"timestamp>=" : "2009-05-06T12:00",
"timestamp<=" : "2009-05-06T18:00",
# "creator":[{"type":"/type/attribution",
# "creator":"/user/book_bot"}],
"return":"count"
}
oneday = timedelta(1)
oneweek = 7 * oneday
sixhours = oneday / 4
# TODO Analyze date type added vs date topic created
print '\t\t','\t'.join([types[i] for i in range(0,len(types))])
for year in range(start_year, end_year):
for month in range(1,13):
t1 = datetime(year, month, 1)
t2 = t1 + timedelta(30)
c=[]
for i in range(0,len(types)):
q['type'] = types[i]
c.append(count(session,q,t1,t2))
args = [str(year), str(month)]
args.extend([str(c[j]) for j in c])
print "\t".join(args)
if __name__ == '__main__':
main() | Python |
'''
Created on Aug 8, 2009
@author: Tom Morris <tfmorris@gmail.com>
@license: Eclipse Public License
@copyright: 2009 Thomas F. Morris
'''
import json
import logging
from optparse import OptionParser
from freebase.api import HTTPMetawebSession, MetawebError
SEPARATORS = (",", ":")
_log = logging.getLogger('FreebaseSession')
def getSessionFromArgs():
'''Convenience method to create a Freebase session using the username,
password, and host in the command line arguments.
Session is NOT logged in on return.'''
parser = OptionParser()
parser.add_option("-u", "--user", dest="user", help="Freebase username")
parser.add_option("-p", "--password", dest="pw", help="Freebase password")
parser.add_option("-s", "--host", dest="host", help="service host", default = 'api.sandbox-freebase.com')
(options, args) = parser.parse_args()
user = options.user
pw = options.pw
host = options.host
# TODO not sure this is a good idea...
# if not pw:
# pw = getpass.getpass()
_log.info( 'Host: %s, User: %s' % (host, user))
return FreebaseSession(host, username=user, password=pw)
class FreebaseSession(HTTPMetawebSession):
'''
Extended version of HTTPMetawebSession to allow wrappers with our own logging
and error handling.
'''
log = None
writes = 0
reads = 0
writeErrors = 0
readErrors = 0
deferred = True # Convert MQL to triples for later writing with triple loader
triples = []
def __init__(self,server,username,password):
super(FreebaseSession,self).__init__(server, username, password)
self.log = logging.getLogger('FreebaseSession')
self.triples = []
self.encoder = json.JSONEncoder()
def fbRead(self, query):
#log.debug([ ' Read query = ', query])
try:
self.reads += 1
response = self.mqlread(query)
except MetawebError,e:
# TODO - Is the retryable? Wait and retry
# if not retryable throw exception
self.readErrors += 1
self.log.error('**Freebase query MQL failed (%d/%d errors/attempts): %s\nQuery = %s\n' % (self.readErrors, self.reads, repr(e),repr(query)) )
return None
# log.debug([ ' Response = ',response])
return response
def triple(self, subject, predicate, object):
return self.encoder.encode({'s':subject,'p':predicate,'o':object})
def getType(self,query):
# loop through all types and return last one
return query.type
def expandProperty(self,type,prop):
propMap = {'/type/object/type' : 'type',
'/type/object/name' : 'name',
'/type/object/id' : 'id'
}
if prop[0] != '/':
prop = type + '/' + prop;
if propMap[prop]:
prop=propMap[prop]
def formatObject(self, object):
# handle CVTs and any other special requirements
operation = object.connect
if operation == 'update': # ?? can't handle??
pass
elif operation == 'insert': #OK
pass
else:
pass
operation = object.create
if operation == 'unless_connected':
pass
elif operation == 'always':
pass
else:
pass
# 'lang':'/lang/en' - OK, noop
# 'type':'/type/text' - OK, string literal
return object
def getId(self,query):
if query.mid:
return query.mid
elif query.guid:
return query.guid
elif query.id:
return query.id
return None
def triplify(self,query):
triples = []
subject = self.getId(query);
type = self.getType(query)
for k,v in query.iteritems():
pn = self.expandProperty(k)
pv = self.formatObject(v) # can expand to multiple triples and/or CVT
triples.append(self.encoder.encode(self.triple(subject,pn,pv)))
return '\n'.join(triples)
def fbWriteLater(self,query):
t = self.triplify(query)
_log.debug(t)
self.triples.extend(t)
return '' # TODO return success response
def fbWriteFlush(self):
'''Submit all pending triples for processing'''
payload= '\n'.join(self.triples)
# login right before submission to close window where server reboots can affect us
session.login()
resp,body = session.tripleSubmit(triples=payload,
job_comment='A job comment here',
data_comment="%d triples" % len(self.triples))
# if successful
self.triples = []
print resp,body
def fbWrite(self, query):
#log.debug([' Write query = ', query])
if self.deferred:
return self.fbWriteLater(self,query)
try:
self.writes += 1
response = self.mqlwrite(query)
except MetawebError,e:
# TODO - Is the retryable? Wait and retry
# if not retryable throw exception
# Don't retry quota problems - /api/status/error/mql/access Too many writes
# retry 503 Internal server error
# bad request - probably means cookie expired or server rebooted requiring new login
# timeout - retry? how many times? how quickly?
msg = e.args[0]
# Huge hack! Why do we have to do string parsing to find an error code?
if msg.find('/api/status/error/auth') > 0:
self.log.warn('Authentication error on MQL write - attempting to login again %s\n',repr(e))
self.login()
try:
response = self.mqlwrite(query)
return response
except MetawebError, e2:
pass # nested exception - fall through to standard error handling
self.writeErrors += 1
self.log.error('**Freebase write MQL failed (%d/%d failures/attempts): %s\nQuery = %s\n' % (self.writeErrors, self.writes, repr(e),repr(query)) )
return []
# log.debug([ ' Response = ',response])
return response
def fbQueryName(self,name,subquery):
'''Query Freebase for a topic with the given name (including aliases)'''
query = {'/type/reflect/any_value' : [{'lang' : '/lang/en',
'link|=' : ['/type/object/name',
'/common/topic/alias'],
'type' : '/type/text',
'value' : name}],
'name' : None, # return actual formal name of topic
'id' : None,
'guid' : None
}
query.update(subquery)
return self.fbRead([query])
def queryTypeAndName(self, type, names, createMissing = False):
'''Query server for given type(s) by name(s) in name or alias field.
Return list of GUIDs which match single type.
If "type" parameter is a list, look at all types and return list of tuples containing
(guid,[list of matching types]). createMissing may not be used for multiple types.
Return empty list if none of the names are found. Ambiguous matches are not returned.'''
if not names:
return []
if isinstance(type,list):
typeq={'type|=':type,'type':[]}
else:
typeq={'type':type}
ids = []
for name in names:
results = self.fbQueryName(name,typeq)
if not results:
self.log.debug(' '.join(['Warning: name not found', name, 'type:', repr(type)]))
if createMissing:
if isinstance(type,str):
guid = self.createTopic(name, [type])
if guid:
ids.append(guid)
self.log.info('Created new topic ' + str(guid) + ' ' + name)
else:
self.log.error('Failed to create new entry ' + name + ' type: ' + type)
else:
self.log.error('Cannot create topic when searching for multiple types')
elif len(results) == 1:
guid = results[0]['guid']
if isinstance(type,str):
ids.append(guid)
else:
ids.append((guid,results[0]['type']))
# log.debug([' found ', name])
else:
self.log.warn('Non-unique name found for unique lookup ' + name +' type: ' + repr(type))
# TODO We could create a new entry here to be manually disambiguated later
# if createMissing:
# guid = self.createTopic(name, type)
# if guid:
# ids.append(guid)
# log.info('Created new topic which may need manual disambiguation ', guid, ' ', name)
# else:
# log.error('Failed to create new entry ', name, ' type: ', type)
return ids
def createTopic(self, name, types):
common = "/common/topic"
if not common in types: # make sure it's a topic too
types.append(common)
query = {'create': 'unconditional', # make sure you've checked to be sure it's not a duplicate
'type': types,
'name' : name,
'guid' : None
}
response = self.fbWrite(query)
guid = response['guid']
return guid
def tripleSubmit(self, triples, graphport='sandbox', job_comment=None, data_comment=None,
tool_id='/guid/9202a8c04000641f800000001378d774'):
"""do a mql write. For a more complete description,
see http://www.freebase.com/view/en/api_service_mqlwrite"""
# Huge hack to swap out service URL so we can use session login cookie
domain = 'data.labs.freebase.com'
# copy cookies over to new domain
for name,c in self.cookiejar._cookies['api.freebase.com']['/'].items():
c.domain=domain
self.cookiejar.set_cookie(c)
service_url = self.service_url
self.service_url="http://" + domain + "/"
try:
form = {
'action_type':'LOAD_TRIPLE',
# 'user' :'',
# 'operator' : '/user/spreadsheet_bot',
'check_params' : False, # prevents 'not a valid bot user' authentication error
# 'pod' : graphport, # obsolete?
'comments' : job_comment,
'graphport':graphport,
'mdo_info' : {"software_tool":tool_id,
"info_source":"/wikipedia/en/wikipedia",
"name":data_comment},
'payload':triples
}
# self.log.debug('FREEQSUBMIT: %s', form)
service = '/triples/data/'
# headers = {'Accept' : 'text/plain'}
resp,body = self._httpreq(service, 'POST',
form=form)#, headers=headers)
self.log.debug('FREEQSUBMIT RESP: %r', resp)
self.log.debug('FREEQSUBMIT RESP: %r', body)
finally:
self.service_url = service_url
#self.log.info('result: %s', Delayed(logformat, r))
return resp,body
# return self._mqlresult(r)
if __name__ == "__main__":
session = FreebaseSession('api.sandbox-freebase.com','tfmorris','password')
result = session.fbWrite({"create":"unconditional","guid":None})
| Python |
'''
Perform mass 'flag for delete' operation on topics which were previously identified as
Wikipedia disambiguation pages (by bfg-wex/person-disambig.py)
!USE THIS SPARINGLY! If you adapt this application for other uses, understand that
multiple human beings will have to review each and every topic that you flag for
delete. BE SURE that they really need to be deleted and consider doing them in
chunks so as not to flood the review queue.
Created on Oct 18, 2009
@author: Tom Morris <tfmorris@gmail.com>
@license: EPL v1
'''
import sys
from optparse import OptionParser
import getpass
import logging
from freebase.api import HTTPMetawebSession
def main():
parser = OptionParser()
parser.add_option('-u', '--user', dest='user', help='Freebase username', default='tfmorris')
parser.add_option('-p', '--password', dest='pw', help='Freebase password')
parser.add_option('-s', '--host', dest='host', help='service host', default = 'api.sandbox-freebase.com')
(options, args) = parser.parse_args()
user = options.user
pw = options.pw
host = options.host
if not pw:
pw = getpass.getpass()
print 'Host: %s, User: %s' % (host, user)
session = HTTPMetawebSession(host, username=user, password=pw)
session.login()
# Query which matches topics to be deleted
q = { '!pd:/base/jewlib/judaica_owner/research_collections': [{
'id': '/m/0cbl9hh'
}],
'timestamp': [{
'type': '/type/datetime',
'value<': '2010-09-10',
'value>=': '2010-09-09'
}],
'creator': {
'id': '/user/frankschloeffel'
},
'id': None,
'name': None,
'sort': 'name',
'ns0:timestamp': None,
'type': '/base/jewlib/research_collection',
# Not already flagged for delete
'!/freebase/review_flag/item':{'id':None,'optional':'forbidden'}
}
# Old style delete flagWrite query to add topic to the flagged for delete collection
# wq = {'type' : '/freebase/opinion_collection',
# 'authority' : '/user/'+user,
# 'mark_for_delete': {
# 'id' : None,
# 'connect' : 'insert'
# }
# }
# Write query to add topic to the flagged for delete collection
wq = {'type': '/freebase/review_flag',
'kind': {'id': '/freebase/flag_kind/delete'},
'item': {'id': None},
'create': 'unless_exists'
}
result = session.mqlreaditer([q])
count = 0
for r in result:
count += 1
name = r.name if r.name else '*null*'
print '\t'.join([str(count),r.id,name])
wq['item']['id'] = r.id
session.mqlwrite(wq)
print 'Marked a total of %d topics for delete on %s' % (count, host)
if __name__ == '__main__':
main() | Python |
'''
Remove a given type from a set of topics which match a given query.
@author: Tom Morris <tfmorris@gmail.com>
@license: EPL v1
'''
import FreebaseSession
write = False
def main():
session = FreebaseSession.getSessionFromArgs();
if write:
session.login()
q = {'t1:type':'/book/author',
't2:type':'/people/person',
'id':None,
'name~=':'^the', # services, firm, limited, team,associates, organization, agency, project, museum, galleries, bureau, ministry, collection, books, program[me], magazine, committee, department, foundation, staff, institute, group, commission, university, publications,library,society, association
'name':None
}
q2 = {'t1:type':'/geography/geographical_feature',
't2:type':'/geography/geographical_feature_category',
'id':None,
'name':None
}
# Write query to add topic to the flagged for delete collection
wq = {'id':None,
'type' : {'id':'/people/person',
'connect' : 'delete'}
}
result = session.mqlreaditer([q])
count = 0
for r in result:
# if r.name.endswith(' INC'):
count += 1
print '\t'.join([str(count),r.id,r.name])
if write:
wq['id'] = r.id
result = session.mqlwrite(wq)
print '\t'.join([str(count),r.id,result.type.connect])
n = r.name
# if n[-1] == '.':
# n = n[:-1]
# q = {'id':r.id,'name':{'value':n,'lang':'/lang/en','connect':'update'}}
# result = session.mqlwrite(q)
print 'Total of %d topics on %s' % (count, session.service_url)
if __name__ == '__main__':
main()
| Python |
'''
Created on September 8, 2009
Find all properties which are reflexive and expect their own type as a target.
@author: Tom Morris <tfmorris@gmail.com>
'''
from freebase.api import HTTPMetawebSession, MetawebError
PROPERTY_MAX = 250 # max number of property instances to consider
TYPE_MAX = 10000 # Max number of type instances to consider
def get_properties(session):
query = [{"type": "/type/property",
"id": None,
"name": None,
"schema": None,
"expected_type": None,
"unique": None,
"reverse_property": {'name': None, 'id': None, '/type/property/unique': None}, #'optional': False
# "master_property": {'name': None, 'id': None, '/type/property/unique': None},
"limit": 100}]
return session.mqlreaditer(query)
def get_opposite(prop):
opposite_name = ''
opposite_id = None
# if prop.master_property:
# opposite_name = prop.master_property.name
# opposite_id = prop.master_property.id
# elif r.reverse_property:
if prop.reverse_property:
opposite_name = prop.reverse_property.name
opposite_id = prop.reverse_property.id
return opposite_id, opposite_name
def check_consistency(prop):
if not prop.id.startswith(prop.schema):
print ' * Schema is not parent ' + prop.schema + ' ' + prop.id
def get_counts(session, r):
# Skip problematic entries
if r.schema.find('$') >= 0 or r.id.find('/user/bio2rdf/public/bm') >= 0:
return -1, -1
else:
# The count from last night's tallybot is good enough (estimate-count)
q = {'type': r.schema, 'return': 'estimate-count'}
type_count = session.mqlread(q)
q.update({r.id : [{'id': None, 'optional': False}]})
forward_property_count = session.mqlread(q)
del q[r.id]
q.update({r.reverse_property.id : [{'id': None, 'optional': False}]})
reverse_property_count = session.mqlread(q)
return type_count, forward_property_count, reverse_property_count
def which_way_up(prop):
# return 0 for master property navigates towards root, 1 for reverse, -1 for neither
if prop.unique and prop.reverse_property.unique:
# Both unique probably means sequence next/previous
return -1
if prop.unique and not prop.reverse_property.unique:
return 0
if not prop.unique and prop.reverse_property.unique:
return 1
# Hmmm, they're both non-unique
# let's try computing the fanout degree in each direction
# Alternatively, traverse in each direction looking for one direction which
# never has more than one out edge
def out_degree(prop, nodes):
edge_count = 0
max_edges = 0
node_count = 0
next = prop
while next:
oe = edges()
max_edges = max(max_edges, oe)
edge_count += oe
node_count += 1
next = next[prop][0].id
def edges():
pass
def main():
session = HTTPMetawebSession('api.freebase.com')
matches = 0
total = 0
# print '\t'.join(['Type ID', 'Instances', '# of Subgraphs',
# 'Max size of subgraph', 'Max subgraph ID',
# 'Avg size of Subgraph',
# '# of cycles'])
for r in get_properties(session):
total += 1
# Only look at properties where expected type is same as owning type
if r.schema and r.schema == r.expected_type:
check_consistency(r)
opposite_id, opposite_name = get_opposite(r)
# Skip properties with no reverse
if not opposite_id:
continue
# Skip bases and user types
if r.id[:5] == '/user' or r.id[:5] == '/base':
continue
matches += 1
type_count, for_property_count, rev_property_count = get_counts(session, r)
# Skip low usage types
property_count = for_property_count + rev_property_count
if property_count > 2:
try:
print '\t'.join([str(matches), str(type_count),
str(for_property_count), str(rev_property_count),
r.name, r.id, str(r.unique),
opposite_name, str(opposite_id), str(r.reverse_property['/type/property/unique'])])
except:
print '** ' + str(matches) + ' ' + repr(r)
# For debugging skip really big sets
if property_count < PROPERTY_MAX and type_count < TYPE_MAX:
# print '%s - property count %d, type count %d' % (r.id, property_count, type_count)
traverse_tree(session, r.schema, [r.id, opposite_id])
else:
print '\tSkipping %s - property count %d, type count %d' % (r.id, property_count, type_count)
print '\n\nFound ' + str(matches) + ' in ' + str(total) + ' total properties.'
def visit(id, previous, items, props, seen, cyclic):
if id in seen and seen[id] > 1:
return True
seen[id] = 1
if id in items:
item = items[id]
else:
# If it's not in our collection, it probably means it doesn't have the right type
return cyclic
links = []
for p in props:
# TODO Track links separately for each direction/property
links.extend([l.id for l in item[p]])
for l in links:
if l != previous:
if l in seen and seen[l] > 1:
print 'Cycle detected ' + l + ' ' + id
cyclic = True
else:
cyclic |= visit(l, id, items, props, seen, cyclic)
# Mark our down link as fully visited
seen[l] = 2
return cyclic
def traverse_tree(session, type_id, props):
print 'Traversing type %s with props %s, %s' %(type_id, props[0], props[1])
# Items with both property values
q = {'type': type_id,
'id' : None,
props[0] : [{'id' :None}],
props[1] : [{'id' :None}]
}
items = dict([(r.id, r) for r in session.mqlreaditer([q])])
both = len(items)
# Only the first property value
q[props[0]] = [{'id' : None}]
q[props[1]] = [{'id' : None, 'optional' : 'forbidden'}]
more = dict([(r.id, r) for r in session.mqlreaditer([q])])
first = len(more)
items.update(more)
# Only the second property value
q[props[0]] = [{'id' : None, 'optional' : 'forbidden'}]
q[props[1]] = [{'id' : None}]
more = dict([(r.id, r) for r in session.mqlreaditer([q])])
second = len(more)
items.update(more)
print 'Total items = %d (%d, %d, %d)' % (len(items), both, first, second)
seen = {}
todo = {}
subgraphs = []
subgraph_count = 0
subgraph_max_size = 0
subgraph_max_id = ''
cycle_count = 0
for id in items:
if not id in seen:
subgraph = {}
cyclic = visit(id, None, items, props, subgraph, False)
subgraphs.append((id,subgraph))
seen.update(subgraph)
subgraph_count += 1
if (len(subgraph) > subgraph_max_size):
subgraph_max_size = len(subgraph)
subgraph_max_id = id
if cyclic:
cycle_count += 1
# print 'Subgraph id ' + id + ' size ' + str(len(subgraph))
print '\t'.join([type_id, str(len(items)), str(subgraph_count),
str(subgraph_max_size), subgraph_max_id,
str(len(items) * 1.0 /subgraph_count),
str(cycle_count)])
def get_root(id, items, prop):
if not id in items:
print "Id not found "+ id
return None
item = items[id]
if not item[prop]:
return id
if len(item[prop])>1:
return None
return get_root(item[prop][0].id, items, prop)
def traverse_subtree(list, map, props):
item = list.pop()
if item.id in map:
return
map[item.id] = item
for p in props:
pass
def test():
session = HTTPMetawebSession('api.freebase.com')
props = ['/music/instrument/family', '/music/instrument/variation']
traverse_tree(session, '/music/instrument', props)
if __name__ == '__main__':
main()
# test() | Python |
'''
Created on Aug 3, 2009
@author: Tom Morris <tfmorris@gmail.com>
'''
from freebase.api import HTTPMetawebSession, MetawebError
def main():
session = HTTPMetawebSession('api.freebase.com')
since = "2009-10-01"
query = [{"type":"/pipeline/merge_task",
"timestamp":None,
"t:timestamp>":since,
"sort":"-timestamp",
'id':None,
"right_guid":None,
"left_guid":None,
"/pipeline/task/status":None,
"limit":7000
}]
response = session.mqlread(query)
mergeTasks = dict()
dupes = 0
for r in response:
tuple = [r['left_guid'],r['right_guid']]
key = ' '.join(sorted(tuple))
if key in mergeTasks:
dupes += 1
print 'Duplicate pairs for GUIDs %s in tasks %s and %s' % (key,mergeTasks[key],r['id'])
else:
mergeTasks[key] = r['id']
query = [{"type":"/pipeline/delete_task",
"timestamp":None,
"t:timestamp>" : since,
"sort":"-timestamp",
'id':None,
"delete_guid":None,
"/pipeline/task/status":None,
"limit":9000
}]
response = session.mqlread(query)
print 'Total merge dupes: %s' % dupes
print
deleteTasks = dict()
dupes = 0
for r in response:
key = r['delete_guid']
if key in deleteTasks:
dupes += 1
print 'Duplicate GUID %s in delete tasks %s and %s' % (key,deleteTasks[key],r['id'])
else:
deleteTasks[key] = r['id']
print 'Total delete dupes: %s' % dupes
if __name__ == '__main__':
main() | Python |
'''
Geographic utilities
Created on Feb 28, 2009
@author: Tom Morris <tfmorris@gmail.com>
@copyright: 2009,2010 Thomas F. Morris
@license: Eclipse Public License v1 http://www.eclipse.org/legal/epl-v10.html
'''
import logging
from math import cos, sqrt
from pyproj import Proj, transform
from FreebaseSession import FreebaseSession
_log = logging.getLogger('fbgeo')
_usStates = {}
_SUBS=[('(Independent City)',''),
('(Independent city)',''),
('St.','Saint'),
('Ste.','Sainte'),
('Twp.','Township'),
('Twp','Township'),
('Twnshp.','Township'),
('Twnshp','Township'),
('Ft.','Fort'),
('Mt.','Mount'),
]
_BEGIN_SUBS=[('N.','North'),('S.','South'),('E.','East'),('W.','West')]
def normalizePlaceName(name):
for s in _BEGIN_SUBS:
if name.startswith(s[0]):
name = s[1]+name[2:]
for s in _SUBS:
name = name.replace(s[0],s[1])
return name.strip()
def approximateDistance(a, b):
'''Compute approximate local flat earth distance between two points represented by lat/long tuples'''
milePerDegree = 60 * 1.15 # 1 nm/minute, 1 1/7 mile/nm - VERY APPROXIMATE!
xdist = abs(a[0] - b[0]) * cos(abs(a[1]/180.0)) * milePerDegree
ydist = abs(a[1] - b[1]) * milePerDegree
dist = sqrt(xdist ** 2 + ydist ** 2)
return dist
def isSwapped(a, b):
'''Check for swapped long/lat coordinate pairs '''
epsilon = .001
if abs(a[0] - b[1]) < epsilon and abs(a[1] - b[0]) < epsilon:
return True
return False
def acre2sqkm(acre):
return float(acre) * 0.004047
## TODO split into geo and fbgeo ??
def parseGeocode(geocode):
'''Parse a Freebase Geocode object into a long, lat [, elev] tuple'''
lat = geocode['latitude']
long = geocode['longitude']
elev = geocode['elevation']
# It's possible to have objects with no valid contents
# Make sure we've got a complete lat/long pair
if lat == None or long == None:
return None
# Elevation is optional
if elev == None:
return [float(long), float(lat)]
else:
return [float(long), float(lat), float(elev)]
def queryUsStateGuids(session):
'''Query Freebase and return dict of ids for states keyed by 2 letter state code '''
query = [{'guid' : None,
'id' : None,
'name' : None,
'iso_3166_2_code' : None, #Not unique, but if this fails, we need to review assumptions anyway
'key': {'namespace': '/authority/iso/3166-2',
'value~=': '^US-*',
'limit': 0
},
'type' : '/location/administrative_division'
}]
results = session.fbRead(query)
return dict([ (state['iso_3166_2_code'][3:5],state['guid']) for state in results])
def _initUsStateGuids(session):
if not _usStates:
_log.debug('Initializing U.S. States cache')
_usStates.update(queryUsStateGuids(session))
_log.debug('U.S. States cache size = %d' % len(_usStates))
def queryUsStateGuid(session, state):
'''Return Guid for state from cache'''
_initUsStateGuids(session)
if state in _usStates:
return _usStates[state]
return None
def queryCityTownGuid(session, townName, stateGuid, countyName=None):
'''Query Freebase for town by name and return single Id or None '''
# special case Washington, DC since it's not really contained by itself
if stateGuid == _usStates['DC'] and townName.startswith('Washington'):
return _usStates['DC']
results = queryCityTown(session, townName, stateGuid, countyName)
if not results:
# try again without county if we got no exact match
results = queryCityTown(session, townName, stateGuid)
if not results:
return None
elif len(results) == 1:
return results[0]['guid']
elif len(results) == 2:
# HACK to disambiguate misnamed CDPs until Freebase gets cleaned up
cdp = '/location/census_designated_place'
if cdp in results[0]['type'] and not cdp in results[1]['type']:
result = results[1]['guid']
elif cdp in results[1]['type'] and not cdp in results[0]['type']:
result = results[0]['guid']
else:
# TODO One cause of multiple matches are city/town pairs with the same name
# they often can be treated as a single place, so we might be able to figure
# out a way to deal with this
_log.error('Multiple matches for city/town %s in state %s' % (townName,stateGuid))
return None
_log.warn('Multiple matches for city/town ' + townName + ' in state ' + stateGuid +' picked nonCDP ' + result)
return result
_log.error('Multiple matches for city/town '+townName+' in state '+stateGuid + ' ' + result)
return None
def queryCityTown(session, townName, stateGuid, countyName = None):
'''Query Freebase and return list of ids for any matching towns '''
'''county name is matched as leading string to account for both Suffolk and Suffolk County forms'''
query = [{'guid' : None,
'id' : None,
'name':None,
'/type/reflect/any_value' : [{'lang' : '/lang/en',
'link|=' : ['/type/object/name',
'/common/topic/alias'],
'type' : '/type/text',
'value' : townName}],
'/location/location/containedby' : {'guid' : stateGuid},
't:type' : '/location/citytown',
'type' : []
}]
if countyName:
query[0]['cb:/location/location/containedby~='] = '^'+countyName
result = session.fbRead(query)
if not result:
townName = normalizePlaceName(townName)
query[0]['/type/reflect/any_value'][0]['value'] = townName
result = session.fbRead(query)
if not result:
townName = townName.replace('Township','').strip()
query[0]['/type/reflect/any_value'][0]['value'] = townName
result = session.fbRead(query)
return result
def queryCountyGuid(session, name, stateGuid):
'''Query Freebase and return ID for matching county in state'''
query = [{'guid' : None,
'id' : None,
'name|=' : [name, name + ' county'],
'/location/location/containedby' : [{'guid' : stateGuid}],
'type' : '/location/us_county'
}]
results = session.fbRead(query)
if results != None and len(results) == 1:
return results[0]['guid']
def queryLocationContainedBy(session, guid):
'''Query Freebase for locations associated with our topic'''
query = {'guid' :guid,
'id' : None,
'name' : None,
'containedby' : [],
'type' : '/location/location'
}
results = session.fbRead(query)
if results != None:
return results['containedby']
return None
def queryGeoLocation(session, guid):
'''Query Freebase and return Geocode object for given location (or None)'''
query = {'guid' : guid,
'geolocation' : {},
'type' : '/location/location'
}
results = session.fbRead(query)
if results == None or results['geolocation'] == None:
return None
geoId = results['geolocation']['id']
query = {'id' : geoId,
'guid' : None,
'longitude' : None,
'latitude' : None,
'elevation' : None,
'type' : '/location/geocode'
}
return session.fbRead(query)
def addGeocode(session, topicGuid, coords):
query = {'guid': topicGuid,
'type': '/location/location',
'geolocation': {'create': 'unless_connected',
'type': '/location/geocode',
'longitude': coords[0],
'latitude': coords[1]
}
}
if len(coords) > 2:
query['elevation'] = coords[2]
return session.fbWrite(query)
def updateGeocode(session, geocodeGuid, coords):
'''Change the coordinates of an existing geocode'''
query = {'guid': geocodeGuid,
'type': '/location/geocode',
'longitude': {'connect' : 'update', 'value' : coords[0]},
'latitude': {'connect' : 'update', 'value' : coords[1]}
}
if len(coords) > 2:
query['elevation'] = {'connect' : 'update', 'value' : coords[2]}
return session.fbWrite(query)
def addArea(session, topicGuid, area):
query = {'guid': topicGuid,
'type': '/location/location',
# This will fail if it already exists, which is what we want
'area': {'connect': 'insert', 'value': area}}
return session.fbWrite(query)
def addContainedBy(session, topicGuid, containerGuids):
query = {'guid' : topicGuid,
'type' : '/location/location',
'containedby' : [{'connect' : 'insert', 'guid' : g} for g in containerGuids]
}
return session.fbWrite(query)
def utm2lonlat(zone,east,north):
'''Convert UTM NAD27 to long/lat'''
# TODO make this fromUTM() making canonical internal proj/datum implicit?
p1 = Proj(proj='utm',zone=zone,ellps='clrk66') #NAD27 uses the Clark 1866 ellipsoid
# Google Maps & MS Live uses Mercator projection - "Web Mercator" == EPSG:3785
x1,y1=p1(east,north,inverse=True)
# p2 = Proj(init='epsg:3785')
# x,y=transform(p1,p2,x1,y1)
return x1,y1
def test():
_log.addHandler(logging.StreamHandler())
_log.setLevel(logging.DEBUG)
tests = [('Newlin Twp.','Chester','PA','/en/newlin_township'),
('St. Petersburg Beach','Pinellas', 'FL','/en/st_pete_beach'),
('W. Bradford Twp.','Chester', 'PA','/en/west_bradford_township'),
('S. Brunswick Township','Middlesex', 'NJ','/en/south_brunswick_township'),
('Mt. Laurel Township','Burlington', 'NJ', '/en/mount_laurel_township'),
# ('','',''),
]
session = FreebaseSession('api.freebase.com','','')
session.touch()
for t in tests:
result =queryCityTown(session, t[0], queryUsStateGuid(session, t[2]), t[1])
if result and result[0] and result[0]['id']:
id = result[0]['id']
print 'Passed ' if t[3]==id else 'FAILED ',t[0:3],id,result[0]['name']
else:
print 'FAILED ',t
if __name__ == '__main__':
test()
| Python |
'''
Created on Jul 24, 2009
@author: Tom Morris <tfmorris@gmail.com>
@copyright: 2009 Thomas F. Morris
@license: Eclipse Public License v1 http://www.eclipse.org/legal/epl-v10.html
'''
import logging
import re
import time
import urllib2
from freebase.api import HTTPMetawebSession, MetawebError
from simplestats import Stats
_PERIOD = 0.5 # Don't query any more often then every _PERIOD seconds
log = logging.getLogger('wikipedia')
# Generator for intervals which give 1 sec. intervals (minus time already spent)
# (yes, this is just so I can play with generators!)
def waitTimeGenerator(interval):
t1 = time.time() - interval
while True:
t2 = time.time()
wait = interval - (t2 - t1)
t1 = t2
yield wait
# Global wait time generator
__waiter = waitTimeGenerator(_PERIOD)
def fetch(wpid):
# Wikipedia will reject requests without user agent field set
opener = urllib2.build_opener()
opener.addheaders = [('User-agent', 'Mozilla/5.0')]
# Throttle Wikipedia reads to 1 req/sec
wait = __waiter.next()
if (wait > 0):
time.sleep(wait)
url = 'http://en.wikipedia.org/wiki/index.html?curid=' + str(wpid)
try:
handle = opener.open(url)
content = handle.read()
handle.close()
except IOError:
log.error('I/O error talking to wikipedia.org')
# if handle != None:
# handle.close()
# TODO: Throw so our caller knows there's a problem
content = ''
return content
def queryArticle(wpids, matchString, regexs, exact):
result = None
quality = 'no'
for wpid in wpids:
content = fetch(wpid)
if content.find(matchString) >= 0:
quality = 'exact'
result = wpid
elif not exact:
for reg in regexs:
if reg.search(content) != None:
if result == None:
result = wpid
quality = 'likely'
break
else:
log.warn('Found multiple candidates with Wikipedia matches - matchString ' + matchString
+ ' first - WP ID ' + result
+ ' new - WP ID ' + wpid)
result = None
break
return result
def __test():
regexs = [re.compile('.ational\s*.egister\s*.f\s*.istoric\s*.laces'),
re.compile('.ational\s*.onument'),
re.compile('.ational\s*.emorial')]
print queryArticle(['52302'], "Jefferson", regexs, False)
print queryArticle(['52302'], "asdfasdfasdfasdfasd", regexs, False)
print queryArticle(['1'], "Jefferson", regexs, False)
# TODO check results programmatically
# TODO check elapsed time to make sure we aren't querying too fast
if __name__ == '__main__':
__test() | Python |
'''
Check previously id pairs previously extracted from Wikipedia using
BFG (bfg-wex/nrhp-ids.py) against the current Freebase graph and
note whether they are OK, missing, or have already been added,
but for a different topic (a sign that the two topics need to be
reviewed for merging).
Expected input is a three column TSV file:
index<tab>Wikipedia ID<tab>NRIS ID
Output format is
total_count/missing_count/mismatch_count<space><keyword><space>NRIS ID<space>Freebase ID
where keyword is
keyword:= missing | mismatch | merge
A simple grep filter will produce a file that can be used by the next stage in the pipe.
@since Jan 14, 2010
@author: Tom Morris <tfmorris@gmail.com>
@copyright Copyright 2010. Thomas F. Morris
@license: EPL V1
'''
import csv
from fileinput import FileInput
from freebase.api import HTTPMetawebSession,MetawebError
def readids(file):
reader = csv.reader(FileInput(file),dialect=csv.excel_tab)
count = 0
ids = {}
for r in reader:
count += 1
if count > 1:
wpid = r[1]
nrisid = r[2].rjust(8,'0')
if len(nrisid) > 8 or nrisid.find('E') > 0:
print '**skipping NRIS ID %s, wpid %s' % (nrisid, wpid)
else:
ids[nrisid]=wpid
print 'Read %d ids' % count
return ids
def query(session, wpid,nrisid):
wpkey = '/wikipedia/en_id/'+wpid
q = {
'id':wpkey,
'guid':None,
'/base/usnris/nris_listing/item_number' : None
}
# print wpid
result = session.mqlread(q)
if result:
nrid = result['/base/usnris/nris_listing/item_number']
if not nrid:
nrid = ''
result2 = session.mqlread({'key':[{'namespace':'/wikipedia/en_id','value':None,'optional':True}],
'guid':None,
'/base/usnris/nris_listing/item_number':nrisid})
if result2:
if result2['key'] and result2['key'][0]['value'] != wpid:
return 'mismatch %s %s' % (result['guid'],result2['guid'])
else:
# TODO do some extra verification to make sure it's a topic we created?
return 'merge %s %s' % (result['guid'],result2['guid'])
else:
return 'missing %s %s' % (nrisid,wpkey)
else:
return None
def main():
ids = readids('nris-ids.csv')
session = HTTPMetawebSession('api.freebase.com');
count = 0
missing = 0
mismatch = 0
start = 0
for nrid,wpid in ids.iteritems():
count += 1
if count >= start:
result = query(session, wpid, nrid)
if result:
if result.startswith('missing'):
missing += 1
elif result.startswith('mismatch'):
mismatch += 1
print '%d/%d/%d %s' % (count,missing,mismatch,result)
if __name__ == '__main__':
main() | Python |
'''
Created on Jul 24, 2009
@author: Tom Morris <tfmorris@gmail.com>
@copyright: 2009 Thomas F. Morris
@license: Eclipse Public License v1 http://www.eclipse.org/legal/epl-v10.html
'''
import logging
import re
suffixRe = re.compile('(J|j)r|Sr|II|III|IV|(S|s)econd|2nd|Inc|MD|M\.D\.')
prefixRe = re.compile('Dr|Capt|Col|Gen|Mrs|Rev')
log = logging.getLogger('names')
def isNameSeries(name):
return name.find('&') >=0 or name.find(' and ' ) >=0;
def normalizePersonName(name):
# Check for firms/partnerships first and skip them
if isNameSeries(name):
return name
# Special case for multiple significant person names
if name.lower().strip() == 'multiple':
return ''
# Strip various forms of et al
# TODO use regex
for s in [', et al.', ',et al.', ', et al', ',et al', ', et.al.' ]:
name = name.replace(s, '')
pieces = name.split(',')
if len(pieces) == 1:
return name
elif len(pieces) == 2:
givennames = pieces[1].strip()
suffix = ''
g = givennames.lower()
if g.endswith('jr.'):
givennames = givennames[:-4]
suffix = ' Jr.'
if g.endswith('jr'):
givennames = givennames[:-3]
suffix = ' Jr.'
if g.endswith('iii'):
givennames = givennames[:-4]
suffix = ' III'
return ' '.join([givennames, pieces[0].strip()])+suffix
elif len(pieces) == 3:
# TODO Do we want John Smith Jr or John Smith, Jr ?
return ' '.join([pieces[1].strip(), pieces[0].strip()]) + ', ' + pieces[2].strip()
else:
# TODO: More cases
return name
def normalizeName(siteName):
'''Attempt to undo various name transformations'''
# Get rid of (Boundary Increase), (schooner), etc
siteName = stripParen(siteName)
# Convert double hyphen style m-dash to single hyphen
while siteName.find('--') >= 0:
siteName = siteName.replace('--', '-')
commas = siteName.count(', ')
if commas == 0:
return siteName
# log.debug('Old name ' + name)
# TODO - Split on commas and remove spaces separately
pieces = siteName.split(', ')
# If it looks like a series, leave it as is
if ' and ' in pieces[commas].lower() or '&' in pieces[commas]:
return siteName
if commas == 1:
result = ' '.join([pieces[1], pieces[0]])
elif commas == 2:
result = ' '.join([pieces[1], pieces[0], pieces[2]])
elif commas == 3:
if not suffixRe.search(pieces[2]) == None:
## TODO: ?Generalize to keep all commas except for 1st??
result = ' '.join([pieces[1], ', '.join([pieces[0], pieces[2], pieces[3]])])
# log.debug([' Converted ',siteName,' to ',result])
elif not prefixRe.search(pieces[2]) == None:
result = ' '.join([pieces[2], pieces[0], pieces[1], pieces[3]])
elif not prefixRe.search(pieces[1]) == None:
result = ' '.join([pieces[1], pieces[2], pieces[0], pieces[3]])
elif len(pieces[2]) == 2 and pieces[2][1:] == '.':
# Handle lone middle initial
result = ' '.join([pieces[1], pieces[2], pieces[0], pieces[3]])
else:
# log.debug(['**no RE match for ', siteName])
result = siteName
else:
# log.debug(['**no new name for ', siteName])
result = siteName
# TODO: What other cases do we need to handle?
# Pierce, Capt, Mial, Farm
# Winslow, Luther, Jr., House
# Little, Arthur D., Inc., Building
# Cutter, Second, A. P., House
# Olmsted, Frederick Law, House, National Historic Site
# Lindsay, James-Trotter, William, House
# Lansburgh, Julius, Furniture Co., Inc.
return result
def stripParen(s):
'''Strip a (single) parenthetical expression from a string'''
p1 = s.find('(')
if p1 >= 0:
p2 = s.find(')', p1)
if p2 >= 0:
s = s[:p1].strip() + ' ' + s[p2+1:].strip()
else:
log.warn('Failed to find closing paren ' + s)
s = s[:p1].strip()
return s
def __test():
print normalizeName("Smith, Jones, and Wiggins (huh)")
print isNameSeries("Smith, Jones, and Wiggins")
print normalizePersonName("Morris, Thomas F.")
print normalizePersonName("Dr. T. John Smith (editor)")
print normalizePersonName("Smith, John, Jr.")
print normalizePersonName("Smith, John Jr.")
print normalizePersonName("Smith, John Jr")
if __name__ == '__main__':
__test() | Python |
'''
Load missing NRIS ids into Freebase using FreeQ/Triple Loader. Input is a file of
Wikipedia ID / NRIS ref num tuples. This file is separately generated using
template/infobox data from BFG. Program creates triples to assign keys for the
reference number and add appropriate types to the existing topics.
Created on Jan 14, 2010
@author: Tom Morris <tfmorris@gmail.com>
@copyright: Copyright 2010 Thomas F. Morris
@license: Eclipse Public License (EPL) v1
'''
import json
import logging
from fileinput import FileInput
from FreebaseSession import FreebaseSession, getSessionFromArgs
def main():
file = FileInput('id-missing.txt') # 4 space separated columns 1 & 2 junk, 3 - NRIS ref#, 4 - FB ID
session = getSessionFromArgs();
# status = session.mqlwrite([{"id":"/guid/9202a8c04000641f800000001378d774", "type":{"id":"/common/topic","connect":"insert"}}])
triples = []
count = 0
for line in file:
fields = line.strip().split(' ')
id = fields[3]
refno = fields[2]
triple = {'s':fields[3], 'p': 'key','o':'/base/usnris/item/'+fields[2]}
triples.append(json.JSONEncoder().encode(triple))
triple.update({'p':'type','o':'/base/usnris/nris_listing'})
triples.append(json.JSONEncoder().encode(triple))
triple.update({'p':'type','o':'/base/usnris/topic'})
triples.append(json.JSONEncoder().encode(triple))
triple.update({'p':'type','o':'/protected_sites/listed_site'})
triples.append(json.JSONEncoder().encode(triple))
payload= '\n'.join(triples)
# payload=json.JSONEncoder().encode({'s':'/guid/9202a8c04000641f800000001378d774','p':'alias','o':'Le remplisseur de Thomas','lang':'/lang/fr'})
# print payload
session.login() # login right before submission to close window where server reboots can affect us
resp,body = session.tripleSubmit(triples=payload,job_comment='Trying it again',data_comment="%d topics from U.S. Register of Historic Places" % len(triples))
print resp,body
if __name__ == '__main__':
main() | Python |
'''
Read the U.S. National Register of Historic Places database and load it
into Freebase.
@requires: dbf from http://dbfpy.sourceforge.net/
@requires: freebase-api from http://freebase-api.code.google.com
@requires: mdbtools
@author: Tom Morris <tfmorris@gmail.com>
@copyright: 2009,2010 Thomas F. Morris
@license: Eclipse Public License v1 http://www.eclipse.org/legal/epl-v10.html
'''
from __future__ import with_statement
import csv
import datetime
from fileinput import FileInput
import logging
import logging.handlers
import os
import re
import shutil
import subprocess
import tempfile
import threading
import time
import traceback
import urllib
import urllib2
import zipfile
from dbfpy import dbf
import fbgeo
import FreebaseSession
from names import isNameSeries, normalizeName, normalizePersonName
import NRISkml
from simplestats import Stats
### Initialization of globals (ick!)
fetch = False # Fetch files from National Park Service (only published a few times a year)
# Inclusion criteria for states, significance, and resource type
# Empty list [] matches everything
incState = []
incSignificance = ['IN','NA']#,'ST']# ,'LO','NO'] # INternational, NAtional, STate, LOcal, NOt indicated
incResourceType = ['B','S','U','O','D'] # Building, Site, strUcture, Object, District,
incRestricted = True # Include sites with restricted locations (usually archaelogical sites)
incNonNominated = True # include sites without special designation like National Historical Landmark
createTopics = False # Create new Freebase topics for listings which can't be reconciled
# Use the following parameter to restart a run in the middle if it was interrupted
startingRecordNumber = 0
logging.basicConfig(level=logging.DEBUG)
logging.getLogger().setLevel(logging.WARN) # dial down freebase.api's chatty root logging
log = logging.getLogger('NRISupload')
log.setLevel(logging.DEBUG)
baseUrl = 'http://www.nr.nps.gov/NRISDATA/'
workDir = ''
masterFile = 'master.exe'
detailFile = 'DETAIL.EXE'
filenames = ['README.TXT', 'SCHEMA.DBF', masterFile, detailFile]
geoBaseUrl = 'http://www.nr.nps.gov/NRISGEO/'
geoFile = 'spatial.mdb'
geoUrl = geoBaseUrl + geoFile
kmzBaseUrl = geoBaseUrl + 'Google_Earth_layers/'
kmzFiles = ['NRHP - Midwest Region.kmz',
'NRHP - Northeast Region.kmz',
'NRHP - South Region.kmz',
'NRHP - Territories Region.kmz',
'NRHP - West Region.kmz'
]
lookupTables = ['CERTM', # Certification Status LI = Listed, etc
'STATEM', # State name
'COUNTYM', # County name
'COUNTYD', # Place detail (county, city, state)
'NOMNAMED', # Nominator category
'LEVSGD', # Level of significance
'ARCHTECD', # Architect
'ARSTYLM',
'ARSTYLD',
# 'AREASGD', # Area of Significance -> AREASGM.AREASSG (40 categories)
# 'AREASGM',
# 'PERIODD', # Period (multiple) PERIODCD->PERIODM.PERIOD
# 'PERIODM',
'SIGYEARD', # Significant year CIRCA = ''|C, SIGYEAR=year (can be multiple years)
'SIGNAMED', # Significant names
'CULTAFFD', # Cultural affiliation CULTAFFL
'OTHNAMED', # Aliases / alternate names
# 'MATD', # Material made of ->MATM.MAT
# 'MATM',
# 'APRCRITD', # Applicable criteria A= Event, B= Person, C= Architecture/Engineering, D=Information Potential
# 'OTHCERTD', # Other certification and date
# 'OTHDOCD', # Sources of other documentation in cultural resource surveys - OTHDOCCD -> OTHDOCM.OTHDOC
# 'OTHDOCM',
] #, 'OSTATED']
# lookupTables = ['COUNTYD'] # short list for quicker debugging runs
stats = Stats()
tables = dict() # Global set of database tables we've read
#### Methods ####
def indexNames(db):
'''Return a dict indexed by name with a list of refNums for each name'''
names = dict()
for rec in db:
name = normalizeName(rec['RESNAME'])
refNum = rec['refNum']
try:
names[name].append(refNum)
except KeyError:
names[name] = [refNum]
nonUniques = [len(names[name]) for name in names if len(names[name]) > 1]
log.info('Total non-unique names:' + str(len(nonUniques)))
log.debug(['Counts of non-uniques:', nonUniques])
return names
def queryName(session, name):
''' Query Freebase by name (including aliases) excluding known listings'''
query = { 'type' : [],
't:type' : '/common/topic',
# Exclude NRIS listings because they would have matched exactly on refnum
't2:type' : [{'id':'/base/usnris/nris_listing',
'optional':'forbidden'}],
'key' : [{'namespace' : '/wikipedia/en_id',
'value' : None
}]
}
results = session.fbQueryName(name, query)
return results
def queryArchitect(session, names):
'''Query server for architect by name. List of GUIDs returned. Non-unique and not found names skipped.'''
normalizedNames = [normalizePersonName(n) for n in names if n.lower() != 'unknown']
for n in normalizedNames:
stats.incr('Arch:',str(n))
# type = [
# "/architecture/engineer",
# "/architecture/engineering_firm",
# "/architecture/architect",
# "/architecture/architecture_firm"
# ]
type = '/architecture/architect'
return session.queryTypeAndName(type, normalizedNames)
def uniquifyYears(seq):
result = []
for item in seq:
yr = item[1]
if yr and int(yr) > 2020: # Assume dates in future are really BCE
yr = "-" + yr
if yr and yr not in result:
result.append(yr)
result.sort()
return result
class ArchStyle:
SKIP_STYLES = ['01','80','90']
def __init__(self,session):
'''Query Freebase server for all architectural styles in our short table'''
# Remap some NPS architecture style names to their Freebase equivalents
# TODO Early Republic == ?
styleMap = {'bungalow/craftsman' : 'american craftsman',
'mission/spanish revival' : 'mission revival',
'colonial' : 'american colonial',
'pueblo' : 'pueblo revival',
'chicago' : 'chicago school',
'late victorian' : 'victorian',
'modern movement' : 'modern',
}
self.ids={}
for c in lookupKeys('ARSTYLM'):
if not c in self.SKIP_STYLES: # Skip None, Other, Mixed
name = lookup('ARSTYLM', c)[0].lower()
name = styleMap.get(name, name)
result = session.queryTypeAndName('/architecture/architectural_style', [name])
if len(result) != 1:
log.warn('Failed to find Architecture style code: %s, name: %s' % (c,name))
else:
self.ids[c]=(result[0],name)
def lookup(self,codes):
'''Look up Freebase IDs for a list of architectural styles.
Return empty list if none found.'''
ids = []
for c in codes:
if c not in self.SKIP_STYLES:
if c in self.ids:
id,name=self.ids[c]
ids.append(id)
stats.incr('ArchStyle:',name)
else:
log.debug('Failed to find Architecture style code:' + c)
return ids
class Significance:
def __init__(self,session):
'''Query server and return dict of ids for significance levels keyed by first two letters of name'''
query = [{'type' : '/base/usnris/significance_level',
'name' : None,
'id' : None,
'guid' : None}]
results = session.fbRead(query)
if len(results) < 4:
log.critical('Expected at least 4 significance levels, got ' + str(results))
self.guids= dict([(item['name'].lower()[0:2], item['guid']) for item in results])
def lookup(self,significance):
if significance:
s = significance.lower()
if s in self.guids:
return self.guids[s]
class Category:
def __init__(self,session):
'''Query server and return dict of ids for categories keyed by name'''
catProp = '/protected_sites/natural_or_cultural_site_designation/categories'
query = [{catProp : [
{
'guid' : None,
'id' : None,
'name' : None
}],
'id' : '/en/national_register_of_historic_places'
}]
results = session.fbRead(query)
self.guids = dict([(cat['name'].lower(), cat['guid']) for cat in results[0][catProp]])
def lookup(self,category):
if category:
category = category.lower().strip()
if category in self.guids:
return self.guids[category]
def addType(session, guids, types):
for guid in guids:
query = {'guid': guid,
'type': [{'connect':'insert', 'id':t} for t in types]
}
log.debug('Adding types ' + repr(query))
response =session.fbWrite(query)
def addAliases(session, guid, aliases):
if aliases and guid:
# TODO Filter out low quality aliases
query = {'guid': guid,
'/common/topic/alias': [{'connect':'insert',
'lang': '/lang/en',
'type': '/type/text',
'value':a.strip()
} for a in set(aliases)]
}
log.debug('Adding aliases ' + repr(aliases) + ' to ' + guid)
return session.fbWrite(query)
def addNrisListing(session, guids, listing):
for guid in guids:
query = {'guid': guid,
'type': {'connect':'insert', 'id':'/base/usnris/significant_person'},
'/base/usnris/significant_person/nris_listing' : {'connect':'insert', 'guid':listing},
}
# log.debug('----Adding listing ' + repr(query))
response =session.fbWrite(query)
def checkAddGeocode(session, topicGuid, coords):
'''
Add geocode to topic if needed. It will warn if long/lat appear swapped,
but *not* fix it. It also doesn't update the geocode if it's within
an epsilon (currently 0.1 nm) of the current location.
'''
geocode = fbgeo.queryGeoLocation(session, topicGuid)
if not geocode:
response = fbgeo.addGeocode(session, topicGuid, coords)
else:
response = None
location = fbgeo.parseGeocode(geocode)
if location:
if fbgeo.isSwapped(location, coords):
log.warn('*** Long/lat appear swapped %s %s' % (repr(geocode), repr(coords)))
# log.debug('*** Swapping geocode long/lat %s %s' % (repr(geocode), repr(coords)))
# response = fbgeo.updateGeocode(session, geocode['guid'], coords[:2])
else:
distance = fbgeo.approximateDistance(location, coords)
if (distance > 0.1):
log.debug('Skipping topic with existing geo info %s distance = %d Coords = %s %s' % (topicGuid,distance,repr(coords),repr(location)))
return response
def addListedSite(session, topicGuid, categoryGuid, certDate):
# TODO check for a listed date which is just a year and update it
query = {'guid': topicGuid,
'type': '/protected_sites/listed_site',
'designation_as_natural_or_cultural_site':
{'create': 'unless_connected',
'type': '/protected_sites/natural_or_cultural_site_listing',
'designation': {'connect': 'insert',
'id': '/en/national_register_of_historic_places'
},
'date_listed': certDate.isoformat()
}
}
if categoryGuid:
query['designation_as_natural_or_cultural_site']['category_or_criteria'] = {'connect': 'insert',
'guid': categoryGuid}
return session.fbWrite(query)
def updateTypeAndRefNum(session, topicGuid, refNum, resourceType, mandatoryTypes, significanceGuid):
types = ['/location/location',
'/protected_sites/listed_site',
'/base/usnris/topic',
'/base/usnris/nris_listing',
]
if resourceType == 'B':
types.append('/architecture/building')
types.append('/architecture/structure')
elif resourceType == 'S':
pass
elif resourceType == 'D':
pass
elif resourceType == 'U':
pass
# Some of these are boats, so we can't use structure
# types.append('/architecture/structure')
# TODO What types?
elif resourceType == 'O':
# TODO: What types of for objects?
pass
else:
log.error('unknown resource type ' + resourceType + ' for topic ' + topicGuid)
# If we've got an area to record, add the Location type no matter what
for t in mandatoryTypes:
if not t in types:
types.append(t)
# Add any missing types, our unique reference number, & significance level
query = {'guid': topicGuid,
'type': [{'connect': 'insert', 'id': t} for t in types],
'/base/usnris/nris_listing/item_number': {'connect': 'insert',
'value': refNum}
}
if significanceGuid:
query['/base/usnris/nris_listing/significance_level'] = {'connect': 'update',
'guid': significanceGuid}
stats.incr('General', 'TopicsUpdated')
log.debug(' Writing guid:' + topicGuid)
return session.fbWrite(query)
def addBuildingInfo(session, streetAddress, topicGuid, stateGuid, cityTownGuid,
archIds, archStyleIds):
query = {'guid': topicGuid,
'type': '/architecture/structure'}
# TODO refactor into fbGEO
addressSubQuery = {}
if streetAddress:
addressSubQuery.update({'street_address': streetAddress,
'state_province_region': {'connect': 'insert',
'guid': stateGuid}})
if cityTownGuid:
addressSubQuery.update({'citytown' : {'connect': 'insert',
'guid': cityTownGuid}})
if addressSubQuery:
addressSubQuery.update({'create': 'unless_connected',
'type': '/location/mailing_address'})
query['address'] = addressSubQuery
if archIds:
query['architect'] = [{'connect': 'insert', 'guid': i} for i in set(archIds)]
stats.incr('Wrote', 'Architect')
# if archFirmIds:
# query['architecture_firm'] = [{'connect': 'insert', 'guid': i} for i in set(archFirmIds)]
# stats.incr('Wrote', 'ArchitectureFirm')
if archStyleIds:
query['architectural_style'] = [{'connect': 'insert', 'guid': i} for i in set(archStyleIds)]
stats.incr('Wrote', 'ArchStyle')
return session.fbWrite(query)
def addMisc(session, topicGuid, significantPersonIds, significantYears, culture):
query = {}
if significantYears:
query['/base/usnris/nris_listing/significant_year'] = [{'connect': 'insert', 'value': y} for y in sorted(list(set(significantYears)))]
if significantPersonIds:
addNrisListing(session, significantPersonIds, topicGuid)
query['/base/usnris/nris_listing/significant_person'] = [{'connect': 'insert', 'guid': guid} for guid in set(significantPersonIds)]
# TODO add /base/usnris/significant_person type to person objects
if culture:
# TODO: screen for dupes which differ only in case since MQL considers them identical (set() won't work)
query['/base/usnris/nris_listing/cultural_affiliation'] = [{'connect': 'insert', 'lang': '/lang/en', 'value': c} for c in set(culture)]
# TODO: Try to match free form text to a /people/ethnicity topic? (or queue for human review)
if query:
query['guid'] = topicGuid
return session.fbWrite(query)
def queryNrisTopic(session, refNum, wpid):
'''Query server for a unique topic indexed by our NRIS reference number'''
query = [{'guid' : None,
'id' : None,
'name' : None,
'/base/usnris/nris_listing/item_number' : refNum,
'key':[{'namespace':'/wikipedia/en_id','value':None,'optional':True}],
}]
results = session.fbRead(query)
if results:
if len(results) == 1:
if wpid:
for k in results[0]['key']:
if k['value'] == wpid:
return results[0]['guid'],results[0]['name']
log.error('Mismatch between NRIS refnum %s and Wikipedia key %s' % (refNum, wpid))
return -1,None
else:
return results[0]['guid'],results[0]['name']
elif len(results) > 1:
log.error('multiple topics with the same NHRIS reference number ' + refNum)
return -1,None
return None, None
def queryTopic(session, refNum, wpid, name, aliases, exactOnly):
# Look up by Ref # enumeration first
topicGuid,topicName = queryNrisTopic(session, refNum, wpid)
if topicGuid == -1:
return topicGuid,topicName
if not topicGuid:
results = queryName(session, name)
incrMatchStats(results)
wpids = extractWpids(results)
item = wpid2Item(results, wpid)
# if result == None:
# log.debug( 'no Wikipedia match found ' + refNum + ' ' + repr(name))
# else:
# log.debug( ' Found ' + quality + ' match ' + result['guid'] + ' ' + repr(result['name']) )
if not item and aliases:
log.debug('Trying aliases' + str(aliases))
for n in aliases:
results = queryName(session, n)
wpids = extractWpids(results)
item = wpid2Item(results, wpid)
if item:
log.info('**Resolved using alias ' + n + ' for name ' + name)
break
# if item == None:
# log.debug('Trying reconciliation service')
# result = reconcileName(session, name, ['/protected_sites/listed_site'])
# log.debug('Reconciliation service return result = ' + str(result))
# if result['recommendation'] == 'automerge':
# # TODO need to get GUID
# item = result['results'][0]
# log.debug('Reconciliation succeeded, recommended = ' + str(result['result'][0]))
#
# # TODO implement reconciliation service
# stats.incr('TopicMatch','ReconciliationServiceMatch')
if item:
topicGuid = item['guid']
topicName = item['name']
else:
stats.incr('TopicMatch','RefNumEnumerationMatch')
return topicGuid,topicName
def loadTable(dbFile):
'''Load a table with index in the first column into a dict'''
result = dict()
log.debug('Loading table ' + str(dbFile))
db = dbf.Dbf(dbFile, readOnly=1)
# We assume the the first column is the key and the remainder the
# value(s). If there's only one value column, it's stored directly
# otherwise we create a map keyed by column name (we could probably
fields = db.fieldNames
log.debug('Reading fields. Key= ' + fields[0] + ' values = ' + str(fields[1:]))
result['__fields__'] = fields
for rec in db:
record = rec.asList()
key = record[0]
if not result.has_key(key):
result[key] = []
if len(record) == 2:
result[key].append(record[1])
else:
result[key].append(record[1:])
db.close
return result
def unzipFiles(files, tempDir):
for f in files :
log.debug( '==Unzipping ' + f)
zfile = zipfile.ZipFile(f, mode='r')
for name in zfile.namelist():
bytes = zfile.read(name)
with open(tempDir + name, 'w') as f:
f.write(bytes)
zfile.close()
log.debug('Unzip complete')
def loadGeo(file,coordinates):
reader = csv.reader(FileInput(file))
reader.next() # get rid of header row
count = 0
total = 0
for r in reader:
total += 1
id,zone,easting,northing = r
if not id in coordinates: # give preference to KMZ file coordinates
try:
coordinates[id]=fbgeo.utm2lonlat(zone,easting,northing)
count += 1
except:
log.warn('failed to convert coordinates %s, %s, %s for id %s' % (zone,easting,northing,id))
log.debug('Loaded %d missing coordinate pairs (of %d) from %s (already had %d from higher quality KMZ)' % (count,total,file, total-count))
return coordinates
def loadIds(file):
'''Input file is a 3 column TSV file with sequence, Wikipedia id, NRIS id'''
reader = csv.reader(FileInput(file),dialect=csv.excel_tab)
count = 0
ids = {}
dupes=[]
for r in reader:
count += 1
if r[1] and r[2]:
wpid = r[1]
nrisid = r[2].rjust(8,'0')
if len(nrisid) > 8 or nrisid.find('E') > 0:
log.debug('**skipping NRIS ID %s, wpid %s' % (nrisid, wpid))
else:
if nrisid in ids:
if not nrisid in dupes:
dupes.append(nrisid)
else:
ids[nrisid]=wpid
for i in dupes:
wpid = ids[i]
del ids[i]
log.warn('Skipping Wikipedia article #%s with multiple infoboxes' % wpid)
log.debug('Read %d ids' % count)
return ids
def lookup1(table, key):
result = lookup(table, key)
if result == None or result == '':
return result
if len(result) > 1:
return None
return result[0]
def lookup(table, key):
try:
t = tables[table]
try:
return t[key]
except KeyError:
return ''
except KeyError:
log.critical('Unknown table: ' + table)
return ''
def lookupKeys(table):
try:
t = tables[table]
keys = t.keys()
keys.remove('__fields__')
return keys
except KeyError:
log.critical('Unknown table: ' + table)
return ''
def lookupAliases(refNum):
alias = lookup('OTHNAMED', refNum)
aliases = []
if alias:
for a in alias:
aliases.extend(a.split(';'))
for a in aliases:
if a.lower().find('see also') >= 0: # See alsos aren't real aliases
aliases.remove(a)
return aliases
def incrMatchStats(results):
numResults = len(results)
if (numResults) == 0:
stats.incr('TopicMatch','Match0')
# log.debug(['No match Freebase name match ', name])
elif (numResults > 1) :
stats.incr('TopicMatch','MatchN')
# log.debug(['Multiple (', numResults,') Freebase name/alias matches ', name
else:
stats.incr('TopicMatch','Match1')
# log.debug([ ' ', name
# Check if already a Listed Site
for result in results:
if '/protected_sites/listed_site' in result['type']:
stats.incr('ListedSite','True')
else:
stats.incr('ListedSite','False')
return
def extractWpids(items):
wpids = []
for item in items:
for key in item['key']:
# We don't need to check namespace because it's constrained by the query
wpids.append(key['value'])
return wpids
def wpid2Item(items,wpid):
item = None
if wpid and items:
for i in items:
for key in i['key']:
if wpid == key['value']:
item = i
return item
def acre2sqkm(acre):
'''Convert 1/10ths of an acre (as used by NRIS) to sq. km'''
if acre == '9': # Special signal value indicating < 1 acre
acre = 0
if acre == '':
acre = 0
return fbgeo.acre2sqkm(float(acre) * 0.1)
def main():
session = FreebaseSession.getSessionFromArgs()
log.info(''.join(['Selection criteria : States = ', str(incState),
', Significance = ',str(incSignificance),
', Types = ',str(incResourceType)]))
log.info('Create topics = ' + str(createTopics))
log.info('Starting record number = ' + str(startingRecordNumber))
startTime = datetime.datetime.now()
log.info('Starting on ' + session.service_url + ' at ' + startTime.isoformat())
# Make temporary directory
tempDir = tempfile.mkdtemp(suffix='dir', prefix='tempNRISdata') + '/'
#fetch = isFetchNeeded()
if fetch :
for filename in filenames:
url = baseUrl + filename
log.info('Fetching ' + url)
urllib.urlretrieve(url, workDir + filename)
for filename in kmzFiles:
url = kmzBaseUrl + urllib.quote(filename)
log.info('Fetching ' + url)
urllib.urlretrieve(url, workDir + filename)
log.info('Fetching ' + geoUrl)
urllib.urlretrieve(geoUrl, workDir + geoFile)
else:
log.debug('Using local files (no fetch from NRIS web site)')
# Unzip our two compressed files into a temporary directory
unzipFiles([workDir+masterFile, workDir+detailFile], tempDir)
# Load geo data - KML data gets preference because it's more accurate (geocoded from street addresses)
log.info('Loading geo data')
coordinates = NRISkml.parseFiles([workDir + f for f in kmzFiles])
for t in ['point','centroid']: #['Acreage', 'centroid', 'Main', 'point', 'polygon']
tempGeo = tempDir + t + '.csv'
status = subprocess.call('mdb-export %sspatial.mdb %s > %s' % (workDir,t,tempGeo),shell=True)
loadGeo(tempGeo,coordinates)
# Load IDs for all Wikipedia articles which have NRHP infoboxes
wpids = loadIds('nris-ids.csv')
# Read in all our master tables '*M.DBF' and detail xref tables *D.DBF
for table in lookupTables:
tables[table] = loadTable(tempDir + table + '.DBF')
# Lookup and cache the column indexes in our location (aka county) table
countyTable = tables['COUNTYD']
stateColumn = countyTable['__fields__'].index('STATECD') - 1
cityColumn = countyTable['__fields__'].index('CITY') - 1
primeColumn = countyTable['__fields__'].index('PRIMEFLG') - 1
vicinityColumn = countyTable['__fields__'].index('VICINITY') - 1
countyColumn = countyTable['__fields__'].index('COUNTYCD') - 1
# Establish session
session.login()
# Query server for IDs of categories, and significance levels
archStyle = ArchStyle(session)
categories = Category(session)
significances = Significance(session)
# TODO: We could potentially cache IDs for Architects too (but not do pre-lookups)
db = dbf.Dbf(tempDir + 'PROPMAIN.DBF')
log.debug('Main property fields ' + str(db.fieldNames))
log.info('** Pre-scanning for duplicate names **')
names = indexNames(db)
totalCount = len(db)
log.info('** Processing ' + str(totalCount) + ' records in main record table **')
count = 0
try:
for rec in db:
# TODO do we want a try block here to allow us to continue with other records
# if we have a problem with this one
count += 1
stats.incr('General','TotalRecords')
if count < startingRecordNumber:
continue
# Only entries which are Listed or National Landmarks count for our purposes
status = rec['CERTCD']
# stats.incr('CertStatus',status)
if not status == 'LI' and not status == 'NL':
continue
refNum = rec['REFNUM'] # key which links everything together
stats.incr('CertStatus','ListedRecords')
d = rec['CERTDATE']
certDate = datetime.date(int(d[0:4]),int(d[4:6]),int(d[6:8]))
# Significance Level IN=International, NA=National, ST=State, LO=Local, NO=NotIndicated
significance = lookup1('LEVSGD', refNum);
stats.incr('Significance',str(significance))
# Building, District, Object, Site, strUcture
# U used for some non-structure type things like boats, etc, so be careful!
resourceType = rec['RETYPECD']
stats.incr('ResourceType', str(resourceType))
# Skip records which don't match our significance or resource type criteria
if len(incSignificance) > 0 and not significance in incSignificance:
continue
if len(incResourceType) > 0 and not resourceType in incResourceType:
continue
name = normalizeName(rec['RESNAME'])
restricted = (not rec['RESTRICT'] == '') # address restricted
if restricted:
stats.incr('General','LocationInfoRestricted')
if not incRestricted:
log.debug([ 'Skipping restricted site location', restricted,refNum, name])
continue
streetAddress = rec['ADDRESS']
area = acre2sqkm(rec['ACRE'])
if not refNum in countyTable:
log.warn('Warning - no county for ' + ' '.join([refNum, restricted, name]))
state=''
cityTown=''
else:
for entry in countyTable[refNum]:
if entry[primeColumn] != '':
state = entry[stateColumn]
county = lookup1('COUNTYM',entry[countyColumn])[0]
if entry[vicinityColumn] == '':
cityTown = entry[cityColumn]
else:
# Just in the vicinity of, don't record contained by
cityTown = ''
category = lookup('NOMNAMED', refNum)
if category:
category = category[0]
categoryGuid = categories.lookup(category)
# Skip if not a National Historic Landmark, etc
if not incNonNominated and category == '':
continue
# Skip states not selected
stats.incr('State', str(state))
if len(incState) > 0 and not state in incState:
continue
aliases = lookupAliases(refNum)
# We used to only require an exact match if we had more than one listing
# with a name, but we're being stricter now to prevent potential false matches
#topicGuid,topicName = queryTopic(session, refNum, name, aliases, len(names[name]) > 1)
wpid = None
if refNum in wpids:
wpid = wpids[refNum]
topicGuid,topicName = queryTopic(session, refNum, wpid, name, aliases, True)
if topicGuid == -1:
log.debug('Lookup failure (probably ID mismatch) - skipping - '
+ ' '.join([refNum, resourceType, state, str(significance), name, ' - ', category]))
stats.incr('TopicMatch','Mismatch')
continue # error on lookup, just bail out
# TODO return a list of candidate topics to be queue for human review
# TODO Check for incompatible/unlikely types (book, movie, etc)
# TODO Check for compatible/reinforcing types (e.g. anything from protected site)
# TODO Check Building.Address and Location.ContainedBy for (in)compatible addresses
# Still don't have a match, punt...
if not topicGuid:
if createTopics:
# TODO queue potential new topics for human verification?
topicGuid = createTopic(session, name, [])
topicName = name
log.debug('No Freebase topic - created '
+ ' '.join([topicGuid, refNum, resourceType, state, str(significance), name, ' - ', category]))
stats.incr('TopicMatch','CreatedNew')
else:
log.debug('No Freebase topic found - skipping - '
+ ' '.join([refNum, resourceType, state, str(significance), name, ' - ', category]))
stats.incr('TopicMatch','NotFound')
continue
aliases.append(name)
if topicName in aliases:
aliases.remove(topicName) # We might have matched on an alias
addAliases(session, topicGuid, aliases)
# 'FM' Federated States of Micronesia is in database, but not a real state
cityTownGuid = None
containedByGuid = None
stateGuid = fbgeo.queryUsStateGuid(session, state)
if stateGuid:
if cityTown:
cityTownGuid = fbgeo.queryCityTownGuid(session, cityTown, stateGuid, county)
if not cityTownGuid:
# TODO One cause of this are city/town pairs with the same name
# they often can be treated as a single place, so we might be able to figure
# out a way to deal with this
log.warn('Failed to look up city/town '+cityTown+' in '+county+', '+state)
containedByGuid = cityTownGuid
# Use county as backup if vicinity flag was set or our town lookup failed
if not cityTownGuid:
containedByGuid = fbgeo.queryCountyGuid(session, county, stateGuid)
# TODO definition of this field is actually "architect, builder, or engineer"
# so we could try harder to find a match in other disciplines
# currently we throw away any builders or engineers
names = lookup('ARCHTECD', refNum)
archIds = queryArchitect(session, names)
archStyleIds = archStyle.lookup(lookup('ARSTYLD', refNum))
# TODO Do this later when we have a human review queue set up
# significantNames = [normalizePersonName(n) for n in lookup('SIGNAMED', refNum)]
# significantPersonIds = session.queryTypeAndName('/people/person', significantNames, True)
# significantPersonIds = session.queryTypeAndName('/people/person', significantNames, False)
significantPersonIds = []
significantYears = uniquifyYears(lookup('SIGYEARD', refNum))
culture = lookup('CULTAFFD', refNum)
if culture:
for c in culture:
stats.incr('Culture', c)
log.debug(' Culture: ' + str(culture))
log.debug( ' %2.0f%% ' % (count*100.0/totalCount) + ' '.join([str(count), refNum, resourceType, state, str(significance), str(certDate), name, category]))
# Write/update information
sigGuid = significances.lookup(significance)
mandatoryTypes = ['/location/location'] if area > 0 else []
response = updateTypeAndRefNum(session, topicGuid, refNum, resourceType, mandatoryTypes, sigGuid)
# Handle location
if resourceType == 'B': # TODO add type str'U'cture ?
query = addBuildingInfo(session, streetAddress, topicGuid, stateGuid,
cityTownGuid, archIds, archStyleIds)
if stateGuid:
containerGuids = [stateGuid]
if containedByGuid and containedByGuid != stateGuid:
containerGuids.append(containedByGuid)
response = fbgeo.addContainedBy(session, topicGuid, containerGuids)
if area > 0:
response = fbgeo.addArea(session, topicGuid, area)
if refNum in coordinates:
coords = coordinates[refNum][:2] # ignore elevation, it's always 0
response = checkAddGeocode(session, topicGuid, coords)
# Add Listed Site info
# TODO: Check for existing entry that we can update with more specific date, category, etc
response = addListedSite(session, topicGuid, categoryGuid, certDate)
# Add any significance year, people and cultural affiliations
addMisc(session, topicGuid, significantPersonIds, significantYears, culture)
# TODO: Create a 2nd listing if we've got two certification dates and statuses
# if name.lower().find(' and ') >= 0:
# stats.incr('PossibleCompoundTopic')
# Flag/log somewhere
except Exception:
traceback.print_exc()
finally:
db.close()
endTime = datetime.datetime.now()
log.info('Ending at ' + str(endTime) + ' elapsed time = ' + str(endTime-startTime))
log.info('==Statistics==')
log.info(stats.dump())
# Clean up our temporary directory
# log.debug 'Cleaning ', tempDir
# shutil.rmtree(tempDir, True)
def installThreadExcepthook():
"""
Workaround for sys.excepthook thread bug
From http://spyced.blogspot.com/2007/06/workaround-for-sysexcepthook-bug.html
(https://sourceforge.net/tracker/?func=detail&atid=105470&aid=1230540&group_id=5470).
Call once from __main__ before creating any threads.
If using psyco, call psyco.cannotcompile(threading.Thread.run)
since this replaces a new-style class method.
"""
init_old = threading.Thread.__init__
def init(self, *args, **kwargs):
init_old(self, *args, **kwargs)
run_old = self.run
def run_with_except_hook(*args, **kw):
try:
run_old(*args, **kw)
except (KeyboardInterrupt, SystemExit):
raise
except:
sys.excepthook(*sys.exc_info())
self.run = run_with_except_hook
threading.Thread.__init__ = init
if __name__ == '__main__':
installThreadExcepthook()
main()
| Python |
'''
Module to parse a National Park Service KML file containing locations for the
National Register of Historic places.
Created on Feb 27, 2009
@author: Tom Morris <tfmorris@gmail.com>
@copyright: 2009 Thomas F. Morris
@license: Eclipse Public License v1 http://www.eclipse.org/legal/epl-v10.html
'''
from datetime import datetime
import logging
import re
from xml.sax import parseString
from xml.sax.handler import ContentHandler
import zipfile
_log = logging.getLogger('fbkml')
kmzFiles = ['NRHP - Midwest Region.kmz',
'NRHP - Northeast Region.kmz',
'NRHP - South Region.kmz',
'NRHP - Territories Region.kmz',
'NRHP - West Region.kmz'
]
class KmlHandler(ContentHandler):
'''Parse a KML file for the National Park Service National Register of Historic Places'''
#geocodeRE = re.compile('Geocode Match: </b>([0|1])')
#refnumRE = re.compile('NPS Reference Number: </b>([0-9]{8})')
RE = re.compile('.*?(?s)NPS Reference Number:\\s*</b>(\\d{8}).*?(?:Geocode Match: </b>([0-1])).*?',re.DOTALL)
def __init__(self):
self.level = 0
self.buffer = ''
self.count = 0
self.geocodedCount = 0
self.points = {}
self.refNum = ''
self.name = ''
self.coordinates = []
def setDictionary(self, points):
self.points = points
def startElement(self, name, attrs):
# print ' '[:self.level],'Starting ', name, " - ",
self.level += 1
self.buffer = ''
# TODO push element on stack
if name == 'Placemark':
self.coordinates = []
self.refNum = ''
self.name = ''
pass
elif name == 'Point':
self.coordinates = []
pass
return
def characters(self, ch):
self.buffer += ch
def endElement(self, name):
self.level -=1
# print self.buffer
if name == 'Placemark':
# TODO check for missing info
self.points[self.refNum] = self.coordinates
pass
elif name == 'description': # Placemark/description
# <b>NPS Reference Number: </b>88000612<br />
# <b>Geocode Match: </b>1<br />
match = self.RE.match(self.buffer)
if match:
self.refNum = match.group(1)
geocode = match.group(2)
self.count+=1
if geocode == '1':
self.geocodedCount += 1
elif name == 'name': # Placemark/name
self.name = self.buffer
self.buffer = ''
elif name == 'coordinates': # Placemark/Point/coordinates
# Triple long, lat, elev -64.9974736069999,18.3551051620001,0
coords = self.buffer.split(',')
long = float(coords[0])
lat = float(coords[1])
if len(coords) > 2:
elev = float(coords[2])
# TODO this is order dependent and assumes that the description
# element comes before the Point element - true currently, but not guaranteed
self.coordinates = [long, lat, elev]
else:
self.coordinates = [long, lat, None]
# print " Lat = ", lat, " long = ", long, " elev = ", elev
self.buffer = ''
def parse(file, coordinates):
handler = KmlHandler()
handler.setDictionary(coordinates)
# TODO - test for kmz vs kml
if False:
kmlFile = open(file)
parser = make_parser()
parser.setContentHandler(handler)
parser.parse(kmlFile)
else:
kmlFile = zipfile.ZipFile(file, 'r')
entries = kmlFile.filelist
parseString(kmlFile.read(entries[0].filename), handler)
_log.debug("Loaded %d coordinate pairs (%d geocoded)." % (handler.count, handler.geocodedCount))
return handler.coordinates
def parseFiles(files = kmzFiles):
coords = {}
for f in files:
parse(f, coords)
return coords
if __name__ == '__main__':
startTime = datetime.now()
results = parseFiles()
print "Loaded %d (%d geocoded) entries in %t" % (len(results), self.geocodedCount,(datetime.now() - startTime))
# print results | Python |
'''
A super simple package of counters with two levels of categorization.
Created on Jul 26, 2009
@author: Tom Morris <tfmorris@gmail.com>
@copyright: 2009 Thomas F. Morris
@license: Eclipse Public License v1 http://www.eclipse.org/legal/epl-v10.html
'''
from collections import defaultdict
class _defaultdictint(defaultdict):
def __init__(self):
self.default_factory = int
class Stats(defaultdict):
'''
A super simple stats class for keeping counts of things in categories
'''
def __init__(self):
'''
Constructor
'''
self.default_factory = _defaultdictint
def incr(self,cat,key):
try:
self[cat][key] += 1;
except TypeError:
self[cat][key] = 1;
def dump(self):
return "\n".join(["".join([category,":\n",self.dumpline(cdict)])
for category,cdict in iter(sorted(self.iteritems()))])
def dumpline(self,cdict):
# TODO extend to sort by count (probably in reverse) rather than key
return "\n ".join(["%20s :%5d" % (key,count) for key,count in iter(sorted(cdict.iteritems()))])
def __test():
s = Stats()
s.incr('cat1','item')
s.incr('cat1','item')
s.incr('cat1','item')
s.incr('cat1','item2')
s.incr('cat2','item3')
print s.dump()
if __name__ == '__main__':
__test() | Python |
'''
Created on Sep 12, 2009
@author: tfmorris
'''
from freebase.api.session import HTTPMetawebSession
def main():
q = {
'type': '/base/usnris/nris_listing',
'/location/location/containedby': [{
'type': '/location/us_state'
}],
'item_number': None,
'name': None,
'id': None,
'/architecture/structure/address' : [{'state_province_region':None,
'optional':True
}]
}
session = HTTPMetawebSession('api.freebase.com')
result = session.mqlreaditer([q])
total = 0
bad = 0
for r in result:
total += 1
states = r['/location/location/containedby']
state_count = len(states)
if state_count > 1:
bad += 1
print '\t'.join([str(bad)+'/'+str(total),str(state_count), r.item_number, r.id, r.name])
# Remove bad enum
# create new topic with same name & usnris_listing type
if __name__ == '__main__':
main() | Python |
#!/usr/bin/env python
# Copyright 2010 Morris Blackham
#
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from lxml import etree
import time
import datetime
from datetime import date, timedelta
from time import strftime
import os
import shutil
#import logging
import sys
import sendemail
import commands
import fileinput
dateformat = "%m-%d-%y"
timeformat = "%H:%M"
shortformat = ('%m-%d')
sp = " "
# rundate is variable for testing, it gets incremented on each run
# when runtest.py is launched
# use for runtest.py
day = sys.argv[1]
rundate = datetime.datetime.strptime(day, dateformat)
# use for normal mode
#now = datetime.date.today()
#day = now.strftime(dateformat)
#rundate = datetime.datetime.strptime(day, dateformat)
logdate = rundate.strftime(shortformat) + sp + datetime.datetime.now().strftime(timeformat) + " : "
#Set date/time formats for file names and timestamps
todayLong = rundate.strftime(dateformat)
yesterday = (rundate - timedelta(1)).strftime(dateformat)
msg = ""
logFile = ""
configfile = "/etc/opt/fbc/fbc.xml"
try:
tree = etree.parse(configfile)
root = tree.getroot()
except:
print("config file %s file not found" % configfile)
#setup logging
try:
logpath = os.path.normpath(root.find("logpath").text)
except:
logpath = os.getcwd()
if not os.path.isdir(logpath):
try:
os.makedirs(logpath)
except:
print("Unable to create log path %s" % logpath)
loglevel = root.find("loglevel").text
# check if all the binaries and other scripts are in place
gwtmstmp = "/opt/novell/groupwise/agents/bin/gwtmstmp"
dbscript = "/opt/fbc/dbcopy.sh"
if not os.path.isfile(dbscript):
print 'dbcopy.sh script not found, Please install before continuing.'
db = "/opt/novell/groupwise/agents/bin/dbcopy"
if not os.path.isfile(db):
print 'dbcopy not found. Please install before continuing.'
tarscript = "/opt/fbc/fbctar.sh"
if not os.path.isfile(tarscript):
print("Tar shell script not found. Please check installation")
# assign vars from the config file array
backuppath = root.find("backuppath").text
# see if gwtimestamp is enabled
gwts = root.find("gwtimestamp").text
if gwts == "yes":
if not os.path.isfile(gwtmstmp):
print("gwtmstmp binary not found, You must install GW agents to set timestamp")
gwts == "no"
maxinc = int(root.find("maxinc").text)
mountpath = os.path.normpath(root.find("mountpath").text)
offlinepath = os.path.normpath(root.find("offlinepath").text)
temppath = os.path.normpath(root.find("temppath").text)
tardirectory = os.path.normpath(root.find("tarpath").text)
# TO DO: need to find a way to age out old tar files
#tarage = root.find("tarage").text
startday = root.find("startday").text
sendlog = root.find("sendlog").text
smtphost = root.find("smtphost").text
recipient = root.find("emailaddr").text
sp = " "
lockfile = "/etc/opt/fbc/fbc.lock"
class logger:
def __init__(self,logFile):
self.logFile = logFile
def debug(self,msg):
if loglevel == "Verbose":
logfile = open(self.logFile, 'a')
logfile.write(logdate + msg + '\n')
logfile.close()
return
def info(self,msg):
logfile = open(self.logFile, 'a')
logfile.write(logdate + msg + '\n')
logfile.close()
return
def error(self,msg):
logfile = open(self.logFile, 'a')
logfile.write(logdate + msg + '\n')
logfile.write(logdate + "Critical Error encountered! Exiting program.\n")
print("Critical ERROR encountered: %s " % msg)
print("Exiting program!")
quit()
def senderror(errortext):
# Function to send error email, need to add instances of when to send...
subject = 'FreebieCopy error found on %s' % todayLong
message = """\
FreebieCopy encountered the following error:
%s
""" % errortext
sender = 'admin@freebie.blah.com'
attachment = 'blank'
sendemail.send(smtphost, sender, recipient, subject, message, attachment)
return
def removeall(path):
# Function to delete all files from a directory,
if not os.path.isdir(path):
return
else:
shutil.rmtree(path)
def rewritelock(replacestring):
f = open(lockfile,'r')
contents = f.readline()
f.close()
for line in fileinput.input(lockfile, inplace=1):
if contents in line:
line = line.replace(contents,(replacestring + "\n"))
sys.stdout.write(line)
def tarcleanup():
# TO DO: need some code here to check the tar directory and either delete
# old tars or scp (or some other method) to offline storage
# OR, make it a separate script, prbly a shell script
return
def checkspace(source, volume):
# Function to check if we have enough diskspace available
#walk the mounted domain or po directory and get the size
fsize = 0
for (path, dirs, files) in os.walk(source):
for file in files:
filename = os.path.join(path, file)
if os.path.exists(filename):
fsize += os.path.getsize(filename)
# mulitply by 2 to make sure we really have enough
safesize = fsize * 2
# get available diskspace for volume where target directory is located
stats = os.statvfs(volume)
freespace = (stats.f_bavail * stats.f_frsize)
# check if free space is greater than the safe size
if freespace > safesize:
pass
else:
errortext = "Insufficient disk space to continue! Exiting"
logging.error(errortext)
log.error(errortext)
return
def bktype():
# Function to set what type of backup will be run; full or incremental?
global backuptype
global object
global objectdir
global currentinc
logging.debug("Determining object type and backup type")
#find if it's a domain or post office
if e.find("type").text == "po":
object = 1
#logging.debug("object = post office")
currentinc = e.find("currentinc").text
#have to set current inc to something or script blows chunks
if currentinc == None:
currentinc = maxinc
else:
currentinc = int(e.find("currentinc").text)
elif e.find("type").text == "domain":
object = 0
#logging.debug("object type is domain")
else:
logging.error("Object type is not set to domain or po, please check configuration")
# check to see if it's Sun, or other defined start day
# or if the currentinc is blank,
#logging.debug("DEBUG: current incremental number is %s" % currentinc)
# !!!!!!!
# next line remarked out for testing purposes.. need to unrem for ship.
#if rundate.strftime("%a") == startday or currentinc == maxinc:
if object == 0:
backuptype = 0
logging.info("Performing a full backup for %s." % e.find("name").text)
elif object == 1:
if currentinc == maxinc:
backuptype = 1
logging.info("Performing a full backup for %s." % e.find("name").text)
# else if the current incremental is less than the max incremental setting
elif currentinc < maxinc and object == 1:
logging.info("Performing an incremental backup for %s." % e.find("name").text)
backuptype = 2
# else if none of the above match, we'll do a full backup
else:
logging.info("Performing a full backup for %s." % e.find("name").text)
backuptype = 0
#logging.debug("DEBUG: backup type is %s" % backuptype)
return
def getsource():
# defines the source directory for dbcopy
global source
global objectdir
# gets the domain or po directory name from the remote path
if e.find("remotepath").text != None:
objectdir = os.path.basename(e.find("remotepath").text)
else:
logging.error("Remote Path is not set for $s. Please check configuration." % objectdir)
#sets the source path to the mount point, creates it if not found under the mount point
if mountpath != None:
source = os.path.join(mountpath, objectdir)
logging.info("Source directory is %s " % source)
else:
logging.error("Mount point path not set in config file. Please check configuation")
if not os.path.isdir(source):
try:
os.makedirs(source)
logging.debug("Source directory not found. Creating: %s" % source)
except:
logging.error("Unable to create mount path: %s" % source)
# checks if this server is a dom or po, then checks
# if the .db is there, this is how to check if
# the remote filesystem is mounted
if object == 1:
dbfile = "wphost.db"
elif object == 0:
dbfile = "wpdomain.db"
else:
logging.error("object type is not set")
dbpath = os.path.join(source, dbfile)
# if the .db is not found, let's mount it.
if not os.path.isfile(dbpath):
logging.debug("Remote directory is not mounted - mounting %s" % source)
#gets the user, password, remotepath and IP from xml and
# mounts the dir
remoteuser = e.find("remoteuser").text
if remoteuser == None:
logging.error("Remote user not found for this object. Check configuration")
remotepass = e.find("remotepass").text
if remotepass == None:
logging.error("Remote password for this object not found. Check configuration")
ipaddr = e.find("ipaddr").text
if ipaddr == None:
logging.error("IP Address for this object not found. Check configuration")
remotepath = e.find("remotepath").text
if remotepath == None:
logging.error("Remote path not found. Check configuration")
mounttype = e.find("mounttype").text
#find mount type; samba or ncp
if mounttype == None:
logging.error("Mount type not found. Check configuration")
elif mounttype == "samba" or mounttype == "Samba":
mount = "/sbin/mount.cifs"
# sets up the command to mount the remote file system
mountargs = ("-o username=" + remoteuser + ",password=" + remotepass + ",ip=" + ipaddr + ",rw")
#logging.debug("mount arguments are %s" % mountargs)
mountcmd = (mount + sp + remotepath + sp + source + sp + mountargs)
else:
mount = "ncpmount"
if not os.path.isfile('/usr/bin/ncpmount'):
logging.error("ncpmount command not found. please install ncpfs package")
#need to check and accomodate what the remote path ncp string is
if remotepath.startswith('//'):
x = remotepath.split('/',3)
elif remotepath.startswith('/'):
x - remotepath.split('/',2)
else:
x = remotepath.split('/',1)
server = x[-2]
vol = x[-1]
mountargs = ("-S " + server + " -A " + ipaddr + " -V " + vol + " -U " + remoteuser + " -P " + remotepass)
mountcmd = (mount + sp + mountargs + sp + source)
# mount the remote file system
try:
os.system(mountcmd)
except:
logging.error("Mount failed for $s" % mountcmd)
# create some symlinks from the mounted po to a temp area to
# only do dbcopy of the user and msg.db's
# only needed for a PO incremental backup
if backuptype == 2:
logging.debug("Backup type is incremental for post office")
# sets the link path under the fbc/tmp dir, then creates it if not found
linkpath = os.path.join(temppath, objectdir)
logging.debug("DEBUG: temporary link path is %s" % linkpath)
if not os.path.isdir(linkpath):
logging.info("Creating temporary directory with sym links %s for incremental backup." % linkpath)
try:
os.makedirs(linkpath)
except:
logging.error("Could not create temporary directory %s" % linkpath)
# set up array of files/dirs to create links for
# then iterate thru the array and create the symlinks
# except for offiles dir
gwlist = (["wphost.db", "gwpo.dc", "ngwguard.db", "ngwguard.rfl", "ngwguard.fbk","ngwguard.dc","ngwcheck.db", "ofuser", "ofmsg", "gwdms", "ofviews"])
for a in gwlist:
sourcelink = os.path.join(source, a)
targetlink = os.path.join(linkpath, a)
try:
if not os.path.islink(targetlink):
os.symlink(sourcelink, targetlink)
except:
logging.error("Could not create symbolic link for %s" % targetlink)
continue
# now set the dbcopy source to the tmp/po directory
source = linkpath
logging.info("Source path for incremental backup is: %s" % source)
return
def needtar():
# this will check if an existing backup exists in the target directory,
# also check if any backups are older than maxinc value
# for po's we're tarring all incrementals with it's full backup and starting
# fresh with a new full backup.
# if so, tar it up in the fbc/tars/domain or po) directory
global basepath
logging.debug("Determining if old backup's should be tarred.")
def tarprep(tardir):
#function to create variables and call createtar() function
global tarsource
tarsource = os.path.join(basepath, tardir)
logging.info("Tarring existing backup directory: %s " % tardir)
createtar(tarsource, tardir)
return
# for a full po backup type
if backuptype == 1:
if basepath != None:
try:
list = sorted(os.listdir(basepath))
except:
logging.error("getting list of directory %s failed" % basepath)
# get the last full backup path
p1 = os.path.basename(e.find("currentfullpath").text)[0:8]
# setup 2 arrays, one for incremental backup dirs, one for full
incdir = [elem for elem in list if elem.split('-')[3] == 'inc']
fulldir = [elem for elem in list if elem.split('-')[3] == 'full']
# we're going to tar all incrementals and their respective full backups
# need to do the incrementals first
for y in incdir:
backdir = y[0:8]
tardir = y
tarprep(tardir)
#now tar the full backups, except for today's
for z in fulldir:
backdir = z[0:8]
if p1 == None or z == os.path.basename(target):
pass
else:
if backdir != p1:
tardir = z
tarprep(tardir)
else:
logging.error("Basepath not set, Can't determine what directory to check.")
# now tar old domain backups
elif object == 0:
oldpath = os.path.join(target, "wpdomain.db")
if not os.path.isfile(oldpath):
pass
else:
tarsouce = target
tardir = os.path.basename(target)
createtar(tarsource, tardir)
if currentfulldate != None:
# get's the last full backup date from xml to use as "latest"
latest = datetime.datetime.strptime(currentfulldate, dateformat)
logging.info( "latest backup is %s" % latest)
list = os.listdir(basepath)
for y in list:
try:
t = datetime.datetime.strptime(y, dateformat)
except:
continue
# check dates of all backup directorys, then tar it if it's older
# than today's (latest) minus the maxinc number
if (latest - t) > timedelta(maxinc - 1):
tardir = y
tarsource = os.path.join(basepath, y)
createtar(tarsource, tardir)
else:
pass
return
def createtar(tarsource, tardir):
#function to create a tar of an existing backup
logging.info("Tarring existing backup in directory %s" % tardir)
# create base directory if not found
tarpath = os.path.join(tardirectory, objectdir)
if not os.path.isdir(tarpath):
try:
os.makedirs(tarpath)
except:
logging.error("Failed to create base tar directory %s" % tarpath)
# set som vars and proceed
tarfile = os.path.join(tarpath, tardir[0:8]) + ".tar"
#logging.debug("DEBUG: tarfile = %s" % tarpath)
tarargs = (tarfile + sp + tarsource)
#logging.debug("DEBUG: tarargs = %s" % tarargs)
tarcmd = (tarscript + sp + tarargs +">/dev/null")
#logging.debug("DEBUG: tar command should be %s " % tarcmd)
logging.info("Creating tarfile: %s" % tarfile)
# check available disk space and then call the tarscript
checkspace(tarsource, tardirectory)
try:
os.system(tarcmd)
except:
logging.error("Creation of tar file %s failed" % tarfile)
logging.info("Removing old directory %s " % tarsource)
# now remove the old directory
removeall(tarsource)
# just to make sure... may remove the following or the function later.
if os.path.isdir(tarsource):
try:
logging.error("%s directory is still here, what up with that?" % tarsource)
os.rmdir(tarsource)
except:
logging.info("Failed to remove old directory %s" % tarsource)
return
def prepareblob():
# Function to setup links to copy offiles dir from the latest full
# backup directory
global blobtarget
global blobsource
if e.find("currentfullpath") == None:
logging.debug("DEBUG: Current full path not set. It should be!")
quit()
else:
blobtarget = os.path.normpath(e.find("currentfullpath").text)
blobsource = os.path.join(mountpath, objectdir)
symsource = os.path.join(blobtarget, "offiles")
symtarget = os.path.join(target, "offiles")
try:
os.symlink(symsource, symtarget)
logging.info("Creating symbolic link to offiles")
except:
logging.error("Failed to create offiles symlink")
pass
return
def gettarget():
# function to set the target directory for dbcopy
global restorepath
global target
global basepath
global blobsource
global blobtarget
global tarsource
global currentinc
#create base directory for xml file
basepath = os.path.join(backuppath, objectdir)
for el in e.iter("basepath"):
el.text = basepath
# if this is a full backup, set target path to backup/domain(po)/date
if object == 0:
target = os.path.join(basepath, todayLong)
logging.info("DBCopy target path is %s" % target)
currentfullpath = target
for el in e.iter("currentfullpath"):
el.text = currentfullpath
for el in e.iter("currentfulldate"):
el.text = currentfulldate
# for po full
elif backuptype == 1:
#logging.debug("DEBUG: backup type is %s" % backuptype)
target = os.path.join(basepath, todayLong + '-full')
# set currentfullpath to target, then resets currentinc to 1
currentfullpath = target
for el in e.iter("currentfullpath"):
el.text = currentfullpath
for el in e.iter("currentfulldate"):
el.text = currentfulldate
currentinc = "1"
for el in e.iter("currentinc"):
el.text = currentinc
elif backuptype == 2:
target = os.path.join(basepath, todayLong + "-inc")
#set the current full date
for el in e.iter("currentfulldate"):
el.text = currentfulldate
currentinc += 1
for element in e.iter("currentinc"):
element.text = str(currentinc)
# create the target dir if not found
logging.info("Target directory for backup is %s" % target)
if not os.path.isdir(target):
logging.debug("Target directory not found, creating %s" % target)
os.makedirs(target)
needtar()
restorepath = target
for el in e.iter("restorepath"):
el.text = restorepath
if backuptype == 2:
prepareblob()
return
def docopy(source, target):
checkspace(source, target)
dbargs = source + sp + target
dbcmd = dbscript + sp + str(backuptype) + sp + dbargs + sp + yesterday
result = commands.getstatusoutput(dbcmd)
if result[0] == 0:
pass
else:
print 'DBCopy failed with error status %s' % result[0]
return
def copy():
# Function to start the copy process
#global currentinc
#global restorepath
global args
global starttime
global endtime
global objectname
# get time for timestamp before the copy starts
timestamp = rundate.strftime(timeformat)
#logging.debug("Timestamp is %s" % timestamp)
# sets the dbcopy command with its arguments
logging.info("Starting DBCopy for %s" % e.find("name").text)
#call the doopy function
docopy(source, target)
if backuptype == 2:
# args = " -b -m"
# copy offiles to the last full po backup
logging.info("Starting DBCopy to copy OFFILES for %s" %e.find("name").text)
docopy(blobsource, blobtarget)
#make sure we get the wphost.db
hostsource = os.path.join(source, "wphost.db")
hosttarget = os.path.join(target, "wphost.db")
try:
shutil.copyfile(hostsource, hosttarget)
except:
logging.error("Failed to copy wphost.db!")
# check to see if the timestamp is required, then runs the gwtmstmp command
if object == 1 and gwts == "yes":
logging.info("Setting GroupWise time stamp for %s" % e.find("name").text)
tssource = os.path.join(mountpath, objectdir)
tsargs = (" -p " + tssource + " -s -t " + timestamp)
tscmd = gwtmstmp + tsargs
try:
os.system(tscmd)
except:
logging.error("%s timestamp command failed." % tsmd)
else:
logging.debug("GW timestamp setting not enabled. Not setting timestamp")
# create a restore path if not found, then create a symlink pointing to
# the most current backup
offlinedir = os.path.join(offlinepath, objectdir)
if not os.path.isdir(offlinepath):
os.makedirs(offlinepath)
if os.path.islink(offlinedir):
os.unlink(offlinedir)
try:
os.symlink(restorepath, offlinedir)
except:
logging.error("Creation of sym links failed")
#unmount the source - can't mount a ncp server twice..
if backuptype == 2:
unmount = "umount " + blobsource
#remove symlinks from the temp dir
rmlink = os.path.join(temppath, objectdir)
logging.debug("Removing symbolic links from %s" % rmlink)
removeall(rmlink)
command_result = commands.getstatusoutput(unmount)
#wait until the umount command succeeds.
while command_result[0] != 0:
time.sleep(10)
command_result = commands.getstatusoutput(unmount)
else:
unmount = "umount " + source
command_result = commands.getstatusoutput(unmount)
while command_result[0] != 0:
time.sleep(10)
command_result = commands.getstatusoutput(unmount)
return
def writexml():
# Function to write all the changed data back to the config file
try:
tree.write(configfile, pretty_print=True)
except:
logging.error("Failed to update config file")
def main():
global e
global currentfulldate
global currentinc
global objectname
global logging
global log
global mainlog
#create instance for the main log file
mainlog = (os.path.join(logpath, datetime.datetime.strftime(rundate, shortformat)) + ".log")
log = logger(mainlog)
# and start the main log file
log.info("Starting FreebieCopy for %s" % rundate.strftime(dateformat))
log.info("===========================================")
log.info(" ")
currentfulldate = todayLong
lastrundate = rundate.strftime(dateformat)
for el in root.iter("lastrundate"):
el.text = lastrundate
lastrunstart = (rundate.strftime(dateformat) + sp + datetime.datetime.now().strftime(timeformat))
for el in root.iter("lastrunstart"):
el.text = lastrunstart
# create a lock file so web admin doesn't write to the xml file during
# running of this script
if not os.path.isfile(lockfile):
lock = open(lockfile, 'w')
text = "blank"
try:
lock.write(text)
log.info("Creating lockfile: %s" % lockfile)
except:
log.error("Failed to create lock file: %s" % lockfile)
lock.close()
# iterate thru all GW objects in the xml and
# do the copy
for e in root.findall("./server"):
restorepath = e.find("restorepath").text
starttime = (rundate.strftime(dateformat) + sp + datetime.datetime.now().strftime(timeformat))
for el in e.iter("starttime"):
el.text = starttime
objectname = e.find("name").text
#create instance for domain/po log file
agentlogpath = os.path.join(logpath, objectname)
if not os.path.isdir(agentlogpath):
try:
os.makedirs(agentlogpath)
except:
print("Unable to create log path %s" % agentlogpath)
agentlog = (os.path.join(agentlogpath, datetime.datetime.strftime(rundate, shortformat)) + ".log")
# 'logging' is for dom/po log, 'log' is for main log
logging = logger(agentlog)
log.info("Performing backup for %s" % objectname)
logging.info("===========================================")
logging.info(" ")
logging.info("Starting backup for %s" % objectname)
# add object name to lock file, so web admin can see what's currently running.
rewritelock(objectname)
bktype()
getsource()
gettarget()
copy()
endtime = rundate.strftime(dateformat) + sp + datetime.datetime.now().strftime(timeformat)
for el in e.iter("endtime"):
el.text = endtime
logging.info("Backup for %s complete." % objectname)
logging.info("-----------------------------------------------")
log.info("Backup for %s complete." % objectname)
log.info("-----------------------------------------------")
lastrunend = rundate.strftime(dateformat) + sp + datetime.datetime.now().strftime(timeformat)
for el in root.iter("lastrunend"):
el.text = lastrunend
log.info("Writing updates to config file")
writexml()
tarcleanup()
# kill the lock file
if os.path.isfile(lockfile):
os.remove(lockfile)
log.info("Removing lock file.")
else:
log.error("Unable to delete lock file!!")
log.info(" ")
log.info("To see log files for domains and post office,")
log.info("Go to %s/ domainname/poname" % logpath)
log.info(" ")
log.info("========== FreebieCopy Finished ===========")
log.info(" ")
return
def sendlog():
#function to email log file to admin.
subject = 'FreebieCopy status for %s' % todayLong
message = 'Attached is the log file for this run'
sender = 'admin@freebie.blah.com'
try:
sendemail.send(smtphost, sender, recipient, subject, message, mainlog)
log.info("Log file sent to %s" % recipient)
except:
log.info("Unable to send log file")
log.info("==========================================")
# now everthings defined, let's run this puppy!
main()
if sendlog == "Yes":
sendlog()
| Python |
#!/usr/bin/env python
# Copyright 2010 Morris Blackham
#
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# this is a testing tool only!!!
import time
import datetime
from datetime import date, timedelta
from time import strftime
import sys
import os
# runs is assigned as part of command line argument. It's the nubmer of runs
# of dbcopy to execute. Rundate is passed to the fbc.py script on each
# run and is incremented every run.
runs = int(sys.argv[1])
try:
os.system("/opt/fbc/reset")
except:
print "reset failed"
now = datetime.datetime.now()
count = int(1)
while count <= runs:
rundate = now.strftime('%m-%d-%y')
cmd = ("/opt/fbc/fbctest.py %s" % rundate)
os.system(cmd)
now = (now + timedelta(1))
count = count + 1
| Python |
#!/usr/bin/env python
# Copyright 2010 Morris Blackham
#
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from lxml import etree
import time
import datetime
from datetime import date, timedelta
from time import strftime
import os
import shutil
#import logging
import sys
import sendemail
import commands
import fileinput
dateformat = "%m-%d-%y"
timeformat = "%H:%M"
shortformat = ('%m-%d')
sp = " "
# rundate is variable for testing, it gets incremented on each run
# when runtest.py is launched
# use for runtest.py
day = sys.argv[1]
rundate = datetime.datetime.strptime(day, dateformat)
# use for normal mode
#now = datetime.date.today()
#day = now.strftime(dateformat)
#rundate = datetime.datetime.strptime(day, dateformat)
logdate = rundate.strftime(shortformat) + sp + datetime.datetime.now().strftime(timeformat) + " : "
#Set date/time formats for file names and timestamps
todayLong = rundate.strftime(dateformat)
yesterday = (rundate - timedelta(1)).strftime(dateformat)
msg = ""
logFile = ""
configfile = "/etc/opt/fbc/fbc.xml"
try:
tree = etree.parse(configfile)
root = tree.getroot()
except:
print("config file %s file not found" % configfile)
#setup logging
try:
logpath = os.path.normpath(root.find("logpath").text)
except:
logpath = os.getcwd()
if not os.path.isdir(logpath):
try:
os.makedirs(logpath)
except:
print("Unable to create log path %s" % logpath)
loglevel = root.find("loglevel").text
# check if all the binaries and other scripts are in place
gwtmstmp = "/opt/novell/groupwise/agents/bin/gwtmstmp"
dbscript = "/opt/fbc/dbcopy.sh"
if not os.path.isfile(dbscript):
print 'dbcopy.sh script not found, Please install before continuing.'
db = "/opt/novell/groupwise/agents/bin/dbcopy"
if not os.path.isfile(db):
print 'dbcopy not found. Please install before continuing.'
tarscript = "/opt/fbc/fbctar.sh"
if not os.path.isfile(tarscript):
print("Tar shell script not found. Please check installation")
# assign vars from the config file array
backuppath = root.find("backuppath").text
# see if gwtimestamp is enabled
gwts = root.find("gwtimestamp").text
if gwts == "yes":
if not os.path.isfile(gwtmstmp):
print("gwtmstmp binary not found, You must install GW agents to set timestamp")
gwts == "no"
maxinc = int(root.find("maxinc").text)
mountpath = os.path.normpath(root.find("mountpath").text)
offlinepath = os.path.normpath(root.find("offlinepath").text)
temppath = os.path.normpath(root.find("temppath").text)
tardirectory = os.path.normpath(root.find("tarpath").text)
# TO DO: need to find a way to age out old tar files
#tarage = root.find("tarage").text
startday = root.find("startday").text
sendlog = root.find("sendlog").text
smtphost = root.find("smtphost").text
recipient = root.find("emailaddr").text
sp = " "
lockfile = "/etc/opt/fbc/fbc.lock"
class logger:
def __init__(self,logFile):
self.logFile = logFile
def debug(self,msg):
if loglevel == "Verbose":
logfile = open(self.logFile, 'a')
logfile.write(logdate + msg + '\n')
logfile.close()
return
def info(self,msg):
logfile = open(self.logFile, 'a')
logfile.write(logdate + msg + '\n')
logfile.close()
return
def error(self,msg):
logfile = open(self.logFile, 'a')
logfile.write(logdate + msg + '\n')
logfile.write(logdate + "Critical Error encountered! Exiting program.\n")
print("Critical ERROR encountered: %s " % msg)
print("Exiting program!")
quit()
def senderror(errortext):
# Function to send error email, need to add instances of when to send...
subject = 'FreebieCopy error found on %s' % todayLong
message = """\
FreebieCopy encountered the following error:
%s
""" % errortext
sender = 'admin@freebie.blah.com'
attachment = 'blank'
sendemail.send(smtphost, sender, recipient, subject, message, attachment)
return
def removeall(path):
# Function to delete all files from a directory,
if not os.path.isdir(path):
return
else:
shutil.rmtree(path)
def rewritelock(replacestring):
f = open(lockfile,'r')
contents = f.readline()
f.close()
for line in fileinput.input(lockfile, inplace=1):
if contents in line:
line = line.replace(contents,(replacestring + "\n"))
sys.stdout.write(line)
def tarcleanup():
# TO DO: need some code here to check the tar directory and either delete
# old tars or scp (or some other method) to offline storage
# OR, make it a separate script, prbly a shell script
return
def checkspace(source, volume):
# Function to check if we have enough diskspace available
#walk the mounted domain or po directory and get the size
fsize = 0
for (path, dirs, files) in os.walk(source):
for file in files:
filename = os.path.join(path, file)
if os.path.exists(filename):
fsize += os.path.getsize(filename)
# mulitply by 2 to make sure we really have enough
safesize = fsize * 2
# get available diskspace for volume where target directory is located
stats = os.statvfs(volume)
freespace = (stats.f_bavail * stats.f_frsize)
# check if free space is greater than the safe size
if freespace > safesize:
pass
else:
errortext = "Insufficient disk space to continue! Exiting"
logging.error(errortext)
log.error(errortext)
return
def bktype():
# Function to set what type of backup will be run; full or incremental?
global backuptype
global object
global objectdir
global currentinc
logging.debug("Determining object type and backup type")
#find if it's a domain or post office
if e.find("type").text == "po":
object = 1
#logging.debug("object = post office")
currentinc = e.find("currentinc").text
#have to set current inc to something or script blows chunks
if currentinc == None:
currentinc = maxinc
else:
currentinc = int(e.find("currentinc").text)
elif e.find("type").text == "domain":
object = 0
#logging.debug("object type is domain")
else:
logging.error("Object type is not set to domain or po, please check configuration")
# check to see if it's Sun, or other defined start day
# or if the currentinc is blank,
#logging.debug("DEBUG: current incremental number is %s" % currentinc)
# !!!!!!!
# next line remarked out for testing purposes.. need to unrem for ship.
#if rundate.strftime("%a") == startday or currentinc == maxinc:
if object == 0:
backuptype = 0
logging.info("Performing a full backup for %s." % e.find("name").text)
elif object == 1:
if currentinc == maxinc:
backuptype = 1
logging.info("Performing a full backup for %s." % e.find("name").text)
# else if the current incremental is less than the max incremental setting
elif currentinc < maxinc and object == 1:
logging.info("Performing an incremental backup for %s." % e.find("name").text)
backuptype = 2
# else if none of the above match, we'll do a full backup
else:
logging.info("Performing a full backup for %s." % e.find("name").text)
backuptype = 0
#logging.debug("DEBUG: backup type is %s" % backuptype)
return
def getsource():
# defines the source directory for dbcopy
global source
global objectdir
# gets the domain or po directory name from the remote path
if e.find("remotepath").text != None:
objectdir = os.path.basename(e.find("remotepath").text)
else:
logging.error("Remote Path is not set for $s. Please check configuration." % objectdir)
#sets the source path to the mount point, creates it if not found under the mount point
if mountpath != None:
source = os.path.join(mountpath, objectdir)
logging.info("Source directory is %s " % source)
else:
logging.error("Mount point path not set in config file. Please check configuation")
if not os.path.isdir(source):
try:
os.makedirs(source)
logging.debug("Source directory not found. Creating: %s" % source)
except:
logging.error("Unable to create mount path: %s" % source)
# checks if this server is a dom or po, then checks
# if the .db is there, this is how to check if
# the remote filesystem is mounted
if object == 1:
dbfile = "wphost.db"
elif object == 0:
dbfile = "wpdomain.db"
else:
logging.error("object type is not set")
dbpath = os.path.join(source, dbfile)
# if the .db is not found, let's mount it.
if not os.path.isfile(dbpath):
logging.debug("Remote directory is not mounted - mounting %s" % source)
#gets the user, password, remotepath and IP from xml and
# mounts the dir
remoteuser = e.find("remoteuser").text
if remoteuser == None:
logging.error("Remote user not found for this object. Check configuration")
remotepass = e.find("remotepass").text
if remotepass == None:
logging.error("Remote password for this object not found. Check configuration")
ipaddr = e.find("ipaddr").text
if ipaddr == None:
logging.error("IP Address for this object not found. Check configuration")
remotepath = e.find("remotepath").text
if remotepath == None:
logging.error("Remote path not found. Check configuration")
mounttype = e.find("mounttype").text
#find mount type; samba or ncp
if mounttype == None:
logging.error("Mount type not found. Check configuration")
elif mounttype == "samba" or mounttype == "Samba":
mount = "/sbin/mount.cifs"
# sets up the command to mount the remote file system
mountargs = ("-o username=" + remoteuser + ",password=" + remotepass + ",ip=" + ipaddr + ",rw")
#logging.debug("mount arguments are %s" % mountargs)
mountcmd = (mount + sp + remotepath + sp + source + sp + mountargs)
else:
mount = "ncpmount"
if not os.path.isfile('/usr/bin/ncpmount'):
logging.error("ncpmount command not found. please install ncpfs package")
#need to check and accomodate what the remote path ncp string is
if remotepath.startswith('//'):
x = remotepath.split('/',3)
elif remotepath.startswith('/'):
x - remotepath.split('/',2)
else:
x = remotepath.split('/',1)
server = x[-2]
vol = x[-1]
mountargs = ("-S " + server + " -A " + ipaddr + " -V " + vol + " -U " + remoteuser + " -P " + remotepass)
mountcmd = (mount + sp + mountargs + sp + source)
# mount the remote file system
try:
os.system(mountcmd)
except:
logging.error("Mount failed for $s" % mountcmd)
# create some symlinks from the mounted po to a temp area to
# only do dbcopy of the user and msg.db's
# only needed for a PO incremental backup
if backuptype == 2:
logging.debug("Backup type is incremental for post office")
# sets the link path under the fbc/tmp dir, then creates it if not found
linkpath = os.path.join(temppath, objectdir)
logging.debug("DEBUG: temporary link path is %s" % linkpath)
if not os.path.isdir(linkpath):
logging.info("Creating temporary directory with sym links %s for incremental backup." % linkpath)
try:
os.makedirs(linkpath)
except:
logging.error("Could not create temporary directory %s" % linkpath)
# set up array of files/dirs to create links for
# then iterate thru the array and create the symlinks
# except for offiles dir
gwlist = (["wphost.db", "gwpo.dc", "ngwguard.db", "ngwguard.rfl", "ngwguard.fbk","ngwguard.dc","ngwcheck.db", "ofuser", "ofmsg", "gwdms", "ofviews"])
for a in gwlist:
sourcelink = os.path.join(source, a)
targetlink = os.path.join(linkpath, a)
try:
if not os.path.islink(targetlink):
os.symlink(sourcelink, targetlink)
except:
logging.error("Could not create symbolic link for %s" % targetlink)
continue
# now set the dbcopy source to the tmp/po directory
source = linkpath
logging.info("Source path for incremental backup is: %s" % source)
return
def needtar():
# this will check if an existing backup exists in the target directory,
# also check if any backups are older than maxinc value
# for po's we're tarring all incrementals with it's full backup and starting
# fresh with a new full backup.
# if so, tar it up in the fbc/tars/domain or po) directory
global basepath
logging.debug("Determining if old backup's should be tarred.")
def tarprep(tardir):
#function to create variables and call createtar() function
global tarsource
tarsource = os.path.join(basepath, tardir)
logging.info("Tarring existing backup directory: %s " % tardir)
createtar(tarsource, tardir)
return
# for a full po backup type
if backuptype == 1:
if basepath != None:
try:
list = sorted(os.listdir(basepath))
except:
logging.error("getting list of directory %s failed" % basepath)
# get the last full backup path
p1 = os.path.basename(e.find("currentfullpath").text)[0:8]
# setup 2 arrays, one for incremental backup dirs, one for full
incdir = [elem for elem in list if elem.split('-')[3] == 'inc']
fulldir = [elem for elem in list if elem.split('-')[3] == 'full']
# we're going to tar all incrementals and their respective full backups
# need to do the incrementals first
for y in incdir:
backdir = y[0:8]
tardir = y
tarprep(tardir)
#now tar the full backups, except for today's
for z in fulldir:
backdir = z[0:8]
if p1 == None or z == os.path.basename(target):
pass
else:
if backdir != p1:
tardir = z
tarprep(tardir)
else:
logging.error("Basepath not set, Can't determine what directory to check.")
# now tar old domain backups
elif object == 0:
oldpath = os.path.join(target, "wpdomain.db")
if not os.path.isfile(oldpath):
pass
else:
tarsouce = target
tardir = os.path.basename(target)
createtar(tarsource, tardir)
if currentfulldate != None:
# get's the last full backup date from xml to use as "latest"
latest = datetime.datetime.strptime(currentfulldate, dateformat)
logging.info( "latest backup is %s" % latest)
list = os.listdir(basepath)
for y in list:
try:
t = datetime.datetime.strptime(y, dateformat)
except:
continue
# check dates of all backup directorys, then tar it if it's older
# than today's (latest) minus the maxinc number
if (latest - t) > timedelta(maxinc - 1):
tardir = y
tarsource = os.path.join(basepath, y)
createtar(tarsource, tardir)
else:
pass
return
def createtar(tarsource, tardir):
#function to create a tar of an existing backup
logging.info("Tarring existing backup in directory %s" % tardir)
# create base directory if not found
tarpath = os.path.join(tardirectory, objectdir)
if not os.path.isdir(tarpath):
try:
os.makedirs(tarpath)
except:
logging.error("Failed to create base tar directory %s" % tarpath)
# set som vars and proceed
tarfile = os.path.join(tarpath, tardir[0:8]) + ".tar"
#logging.debug("DEBUG: tarfile = %s" % tarpath)
tarargs = (tarfile + sp + tarsource)
#logging.debug("DEBUG: tarargs = %s" % tarargs)
tarcmd = (tarscript + sp + tarargs +">/dev/null")
#logging.debug("DEBUG: tar command should be %s " % tarcmd)
logging.info("Creating tarfile: %s" % tarfile)
# check available disk space and then call the tarscript
checkspace(tarsource, tardirectory)
try:
os.system(tarcmd)
except:
logging.error("Creation of tar file %s failed" % tarfile)
logging.info("Removing old directory %s " % tarsource)
# now remove the old directory
removeall(tarsource)
# just to make sure... may remove the following or the function later.
if os.path.isdir(tarsource):
try:
logging.error("%s directory is still here, what up with that?" % tarsource)
os.rmdir(tarsource)
except:
logging.info("Failed to remove old directory %s" % tarsource)
return
def prepareblob():
# Function to setup links to copy offiles dir from the latest full
# backup directory
global blobtarget
global blobsource
if e.find("currentfullpath") == None:
logging.debug("DEBUG: Current full path not set. It should be!")
quit()
else:
blobtarget = os.path.normpath(e.find("currentfullpath").text)
blobsource = os.path.join(mountpath, objectdir)
symsource = os.path.join(blobtarget, "offiles")
symtarget = os.path.join(target, "offiles")
try:
os.symlink(symsource, symtarget)
logging.info("Creating symbolic link to offiles")
except:
logging.error("Failed to create offiles symlink")
pass
return
def gettarget():
# function to set the target directory for dbcopy
global restorepath
global target
global basepath
global blobsource
global blobtarget
global tarsource
global currentinc
#create base directory for xml file
basepath = os.path.join(backuppath, objectdir)
for el in e.iter("basepath"):
el.text = basepath
# if this is a full backup, set target path to backup/domain(po)/date
if object == 0:
target = os.path.join(basepath, todayLong)
logging.info("DBCopy target path is %s" % target)
currentfullpath = target
for el in e.iter("currentfullpath"):
el.text = currentfullpath
for el in e.iter("currentfulldate"):
el.text = currentfulldate
# for po full
elif backuptype == 1:
#logging.debug("DEBUG: backup type is %s" % backuptype)
target = os.path.join(basepath, todayLong + '-full')
# set currentfullpath to target, then resets currentinc to 1
currentfullpath = target
for el in e.iter("currentfullpath"):
el.text = currentfullpath
for el in e.iter("currentfulldate"):
el.text = currentfulldate
currentinc = "1"
for el in e.iter("currentinc"):
el.text = currentinc
elif backuptype == 2:
target = os.path.join(basepath, todayLong + "-inc")
#set the current full date
for el in e.iter("currentfulldate"):
el.text = currentfulldate
currentinc += 1
for element in e.iter("currentinc"):
element.text = str(currentinc)
# create the target dir if not found
logging.info("Target directory for backup is %s" % target)
if not os.path.isdir(target):
logging.debug("Target directory not found, creating %s" % target)
os.makedirs(target)
needtar()
restorepath = target
for el in e.iter("restorepath"):
el.text = restorepath
if backuptype == 2:
prepareblob()
return
def docopy(source, target):
checkspace(source, target)
dbargs = source + sp + target
dbcmd = dbscript + sp + str(backuptype) + sp + dbargs + sp + yesterday
result = commands.getstatusoutput(dbcmd)
if result[0] == 0:
pass
else:
print 'DBCopy failed with error status %s' % result[0]
return
def copy():
# Function to start the copy process
#global currentinc
#global restorepath
global args
global starttime
global endtime
global objectname
# get time for timestamp before the copy starts
timestamp = rundate.strftime(timeformat)
#logging.debug("Timestamp is %s" % timestamp)
# sets the dbcopy command with its arguments
logging.info("Starting DBCopy for %s" % e.find("name").text)
#call the doopy function
docopy(source, target)
if backuptype == 2:
# args = " -b -m"
# copy offiles to the last full po backup
logging.info("Starting DBCopy to copy OFFILES for %s" %e.find("name").text)
docopy(blobsource, blobtarget)
#make sure we get the wphost.db
hostsource = os.path.join(source, "wphost.db")
hosttarget = os.path.join(target, "wphost.db")
try:
shutil.copyfile(hostsource, hosttarget)
except:
logging.error("Failed to copy wphost.db!")
# check to see if the timestamp is required, then runs the gwtmstmp command
if object == 1 and gwts == "yes":
logging.info("Setting GroupWise time stamp for %s" % e.find("name").text)
tssource = os.path.join(mountpath, objectdir)
tsargs = (" -p " + tssource + " -s -t " + timestamp)
tscmd = gwtmstmp + tsargs
try:
os.system(tscmd)
except:
logging.error("%s timestamp command failed." % tsmd)
else:
logging.debug("GW timestamp setting not enabled. Not setting timestamp")
# create a restore path if not found, then create a symlink pointing to
# the most current backup
offlinedir = os.path.join(offlinepath, objectdir)
if not os.path.isdir(offlinepath):
os.makedirs(offlinepath)
if os.path.islink(offlinedir):
os.unlink(offlinedir)
try:
os.symlink(restorepath, offlinedir)
except:
logging.error("Creation of sym links failed")
#unmount the source - can't mount a ncp server twice..
if backuptype == 2:
unmount = "umount " + blobsource
#remove symlinks from the temp dir
rmlink = os.path.join(temppath, objectdir)
logging.debug("Removing symbolic links from %s" % rmlink)
removeall(rmlink)
command_result = commands.getstatusoutput(unmount)
#wait until the umount command succeeds.
while command_result[0] != 0:
time.sleep(10)
command_result = commands.getstatusoutput(unmount)
else:
unmount = "umount " + source
command_result = commands.getstatusoutput(unmount)
while command_result[0] != 0:
time.sleep(10)
command_result = commands.getstatusoutput(unmount)
return
def writexml():
# Function to write all the changed data back to the config file
try:
tree.write(configfile, pretty_print=True)
except:
logging.error("Failed to update config file")
def main():
global e
global currentfulldate
global currentinc
global objectname
global logging
global log
global mainlog
#create instance for the main log file
mainlog = (os.path.join(logpath, datetime.datetime.strftime(rundate, shortformat)) + ".log")
log = logger(mainlog)
# and start the main log file
log.info("Starting FreebieCopy for %s" % rundate.strftime(dateformat))
log.info("===========================================")
log.info(" ")
currentfulldate = todayLong
lastrundate = rundate.strftime(dateformat)
for el in root.iter("lastrundate"):
el.text = lastrundate
lastrunstart = (rundate.strftime(dateformat) + sp + datetime.datetime.now().strftime(timeformat))
for el in root.iter("lastrunstart"):
el.text = lastrunstart
# create a lock file so web admin doesn't write to the xml file during
# running of this script
if not os.path.isfile(lockfile):
lock = open(lockfile, 'w')
text = "blank"
try:
lock.write(text)
log.info("Creating lockfile: %s" % lockfile)
except:
log.error("Failed to create lock file: %s" % lockfile)
lock.close()
# iterate thru all GW objects in the xml and
# do the copy
for e in root.findall("./server"):
restorepath = e.find("restorepath").text
starttime = (rundate.strftime(dateformat) + sp + datetime.datetime.now().strftime(timeformat))
for el in e.iter("starttime"):
el.text = starttime
objectname = e.find("name").text
#create instance for domain/po log file
agentlogpath = os.path.join(logpath, objectname)
if not os.path.isdir(agentlogpath):
try:
os.makedirs(agentlogpath)
except:
print("Unable to create log path %s" % agentlogpath)
agentlog = (os.path.join(agentlogpath, datetime.datetime.strftime(rundate, shortformat)) + ".log")
# 'logging' is for dom/po log, 'log' is for main log
logging = logger(agentlog)
log.info("Performing backup for %s" % objectname)
logging.info("===========================================")
logging.info(" ")
logging.info("Starting backup for %s" % objectname)
# add object name to lock file, so web admin can see what's currently running.
rewritelock(objectname)
bktype()
getsource()
gettarget()
copy()
endtime = rundate.strftime(dateformat) + sp + datetime.datetime.now().strftime(timeformat)
for el in e.iter("endtime"):
el.text = endtime
logging.info("Backup for %s complete." % objectname)
logging.info("-----------------------------------------------")
log.info("Backup for %s complete." % objectname)
log.info("-----------------------------------------------")
lastrunend = rundate.strftime(dateformat) + sp + datetime.datetime.now().strftime(timeformat)
for el in root.iter("lastrunend"):
el.text = lastrunend
log.info("Writing updates to config file")
writexml()
tarcleanup()
# kill the lock file
if os.path.isfile(lockfile):
os.remove(lockfile)
log.info("Removing lock file.")
else:
log.error("Unable to delete lock file!!")
log.info(" ")
log.info("To see log files for domains and post office,")
log.info("Go to %s/ domainname/poname" % logpath)
log.info(" ")
log.info("========== FreebieCopy Finished ===========")
log.info(" ")
return
def sendlog():
#function to email log file to admin.
subject = 'FreebieCopy status for %s' % todayLong
message = 'Attached is the log file for this run'
sender = 'admin@freebie.blah.com'
try:
sendemail.send(smtphost, sender, recipient, subject, message, mainlog)
log.info("Log file sent to %s" % recipient)
except:
log.info("Unable to send log file")
log.info("==========================================")
# now everthings defined, let's run this puppy!
main()
if sendlog == "Yes":
sendlog()
| Python |
#!/usr/bin/env python
# Copyright 2010 Morris Blackham
#
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# this is a testing tool only!!!
import time
import datetime
from datetime import date, timedelta
from time import strftime
import sys
import os
# runs is assigned as part of command line argument. It's the nubmer of runs
# of dbcopy to execute. Rundate is passed to the fbc.py script on each
# run and is incremented every run.
runs = int(sys.argv[1])
try:
os.system("/opt/fbc/reset")
except:
print "reset failed"
now = datetime.datetime.now()
count = int(1)
while count <= runs:
rundate = now.strftime('%m-%d-%y')
cmd = ("/opt/fbc/fbctest.py %s" % rundate)
os.system(cmd)
now = (now + timedelta(1))
count = count + 1
| Python |
#!/usr/bin/env python
# Copyright 2010 Morris Blackham
#
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
import commands
httpconf = "/etc/apache2/httpd.conf"
def getuser():
global passfile
passfile = raw_input("Enter path to Apache password file (do not put under htdoc dir): ")
user = raw_input("Enter FBC admin ID: ")
pwd = raw_input("Enter admin password: ")
if os.path.isfile(passfile):
cmd = ("/usr/bin/htpasswd2 -b " + passfile + " " + user + " " + pwd)
else:
cmd = ("/usr/bin/htpasswd2 -c -b " + passfile + " " + user + " " + pwd)
retcode = commands.getstatusoutput(cmd)
if retcode < 0:
print "Terminated signal", -retcode
else:
print "Ok", retcode
return(passfile)
def writefile(passfile):
y = open(httpconf,'a')
lines = ["<Location /fbc>\n",
" AuthType Basic\n",
" AuthName \"Authentication Required\"\n",
" AuthUserFile \"" + passfile + "\"\n",
" Require valid-user\n",
"</Location>\n"]
y.writelines(lines)
y.close()
def restart():
cmd = "/etc/init.d/apache2 restart"
retcode = commands.getstatusoutput(cmd)
for line in open("/etc/sysconfig/apache2","r"):
if "APACHE_MODULES" in line:
if line[0] == "#":
pass
else:
if "auth_basic" in line:
getuser()
writefile(passfile)
restart()
| Python |
#!/usr/bin/env python
# Copyright 2010 Morris Blackham
#
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import smtplib
import email.utils
from email.mime.text import MIMEText
from email.MIMEMultipart import MIMEMultipart
from email.MIMEBase import MIMEBase
from email import Utils, Encoders
import mimetypes, sys
def attachment(filename):
fd = open(filename, 'rb')
mimetype, mimeencoding = mimetypes.guess_type(filename)
if mimeencoding or (mimetype is None):
mimetype ='application/octet-stream'
maintype, subtype = mimetype.split('/')
if maintype == 'text':
retval = MIMEText(fd.read(), _subtype=subtype)
else:
retval = MIMEBase(maintype, subtype)
retval.set_paylod(fd.read())
Encoders.encode_base64(retval)
retval.add_header('Content-Disposition', 'attachment', filename = filename)
fd.close()
return retval
def send(smtphost, sender, recipient, subject, message, filename):
#print filename
if filename == 'blank':
msg = MIMEText(message)
body = MIMEText(message, _subtype='plain')
else:
msg = MIMEMultipart()
body = MIMEText(message, _subtype='plain')
msg.attach(body)
msg.attach(attachment(filename))
msg['TO'] = sender
msg['From'] = recipient
msg['Subject'] = subject
msg['Date'] = Utils.formatdate(localtime = 1)
msg['Message-ID'] = Utils.make_msgid()
#print msg.as_string()
server = smtplib.SMTP(smtphost)
#server.set_debuglevel(True)
try:
server.sendmail(sender, [recipient], msg.as_string())
except:
print "connection to %s failed" % smtphost
finally:
server.quit()
| Python |
#!/usr/bin/env python
# Copyright 2010 Morris Blackham
#
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from lxml import etree
import time
import datetime
from datetime import date, timedelta
from time import strftime
import os
import shutil
#import logging
import sys
import sendemail
import commands
import fileinput
dateformat = "%m-%d-%y"
timeformat = "%H:%M"
shortformat = ('%m-%d')
sp = " "
# rundate is variable for testing, it gets incremented on each run
# when runtest.py is launched
# use for runtest.py
#day = sys.argv[1]
#rundate = datetime.datetime.strptime(day, dateformat)
# use for normal mode
now = datetime.date.today()
day = now.strftime(dateformat)
rundate = datetime.datetime.strptime(day, dateformat)
logdate = rundate.strftime(shortformat) + sp + datetime.datetime.now().strftime(timeformat) + " : "
#Set date/time formats for file names and timestamps
todayLong = rundate.strftime(dateformat)
yesterday = (rundate - timedelta(1)).strftime(dateformat)
msg = ""
logFile = ""
configfile = "/etc/opt/fbc/fbc.xml"
try:
tree = etree.parse(configfile)
root = tree.getroot()
except:
print("config file %s file not found" % configfile)
#setup logging
try:
logpath = os.path.normpath(root.find("logpath").text)
except:
logpath = os.getcwd()
if not os.path.isdir(logpath):
try:
os.makedirs(logpath)
except:
print("Unable to create log path %s" % logpath)
loglevel = root.find("loglevel").text
# check if all the binaries and other scripts are in place
gwtmstmp = "/opt/novell/groupwise/agents/bin/gwtmstmp"
dbscript = "/opt/fbc/dbcopy.sh"
if not os.path.isfile(dbscript):
print 'dbcopy.sh script not found, Please install before continuing.'
db = "/opt/novell/groupwise/agents/bin/dbcopy"
if not os.path.isfile(db):
print 'dbcopy not found. Please install before continuing.'
tarscript = "/opt/fbc/fbctar.sh"
if not os.path.isfile(tarscript):
print("Tar shell script not found. Please check installation")
# assign vars from the config file array
backuppath = root.find("backuppath").text
# see if gwtimestamp is enabled
gwts = root.find("gwtimestamp").text
if gwts == "yes":
if not os.path.isfile(gwtmstmp):
print("gwtmstmp binary not found, You must install GW agents to set timestamp")
gwts == "no"
maxinc = int(root.find("maxinc").text)
mountpath = os.path.normpath(root.find("mountpath").text)
offlinepath = os.path.normpath(root.find("offlinepath").text)
temppath = os.path.normpath(root.find("temppath").text)
tardirectory = os.path.normpath(root.find("tarpath").text)
# TO DO: need to find a way to age out old tar files
#tarage = root.find("tarage").text
startday = root.find("startday").text
sendlog = root.find("sendlog").text
smtphost = root.find("smtphost").text
recipient = root.find("emailaddr").text
sp = " "
lockfile = "/etc/opt/fbc/fbc.lock"
class logger:
def __init__(self,logFile):
self.logFile = logFile
def debug(self,msg):
if loglevel == "Verbose":
logfile = open(self.logFile, 'a')
logfile.write(logdate + msg + '\n')
logfile.close()
return
def info(self,msg):
logfile = open(self.logFile, 'a')
logfile.write(logdate + msg + '\n')
logfile.close()
return
def error(self,msg):
logfile = open(self.logFile, 'a')
logfile.write(logdate + msg + '\n')
logfile.write(logdate + "Critical Error encountered! Exiting program.\n")
print("Critical ERROR encountered: %s " % msg)
print("Exiting program!")
quit()
def senderror(errortext):
# Function to send error email, need to add instances of when to send...
subject = 'FreebieCopy error found on %s' % todayLong
message = """\
FreebieCopy encountered the following error:
%s
""" % errortext
sender = 'admin@freebie.blah.com'
attachment = 'blank'
sendemail.send(smtphost, sender, recipient, subject, message, attachment)
return
def removeall(path):
# Function to delete all files from a directory,
if not os.path.isdir(path):
return
else:
shutil.rmtree(path)
def rewritelock(replacestring):
f = open(lockfile,'r')
contents = f.readline()
f.close()
for line in fileinput.input(lockfile, inplace=1):
if contents in line:
line = line.replace(contents,(replacestring + "\n"))
sys.stdout.write(line)
def tarcleanup():
# TO DO: need some code here to check the tar directory and either delete
# old tars or scp (or some other method) to offline storage
# OR, make it a separate script, prbly a shell script
return
def checkspace(source, volume):
# Function to check if we have enough diskspace available
#walk the mounted domain or po directory and get the size
fsize = 0
for (path, dirs, files) in os.walk(source):
for file in files:
filename = os.path.join(path, file)
if os.path.exists(filename):
fsize += os.path.getsize(filename)
# mulitply by 2 to make sure we really have enough
safesize = fsize * 2
# get available diskspace for volume where target directory is located
stats = os.statvfs(volume)
freespace = (stats.f_bavail * stats.f_frsize)
# check if free space is greater than the safe size
if freespace > safesize:
pass
else:
errortext = "Insufficient disk space to continue! Exiting"
logging.error(errortext)
log.error(errortext)
return
def bktype():
# Function to set what type of backup will be run; full or incremental?
global backuptype
global object
global objectdir
global currentinc
logging.debug("Determining object type and backup type")
#find if it's a domain or post office
if e.find("type").text == "po":
object = 1
#logging.debug("object = post office")
currentinc = e.find("currentinc").text
#have to set current inc to something or script blows chunks
if currentinc == None:
currentinc = maxinc
else:
currentinc = int(e.find("currentinc").text)
elif e.find("type").text == "domain":
object = 0
#logging.debug("object type is domain")
else:
logging.error("Object type is not set to domain or po, please check configuration")
# check to see if it's Sun, or other defined start day
# or if the currentinc is blank,
#logging.debug("DEBUG: current incremental number is %s" % currentinc)
# !!!!!!!
# next line remarked out for testing purposes.. need to unrem for ship.
#if rundate.strftime("%a") == startday or currentinc == maxinc:
if object == 0:
backuptype = 0
logging.info("Performing a full backup for %s." % e.find("name").text)
elif object == 1:
if currentinc == maxinc:
backuptype = 1
logging.info("Performing a full backup for %s." % e.find("name").text)
# else if the current incremental is less than the max incremental setting
elif currentinc < maxinc and object == 1:
logging.info("Performing an incremental backup for %s." % e.find("name").text)
backuptype = 2
# else if none of the above match, we'll do a full backup
else:
logging.info("Performing a full backup for %s." % e.find("name").text)
backuptype = 0
#logging.debug("DEBUG: backup type is %s" % backuptype)
return
def getsource():
# defines the source directory for dbcopy
global source
global objectdir
# gets the domain or po directory name from the remote path
if e.find("remotepath").text != None:
objectdir = os.path.basename(e.find("remotepath").text)
else:
logging.error("Remote Path is not set for $s. Please check configuration." % objectdir)
#sets the source path to the mount point, creates it if not found under the mount point
if mountpath != None:
source = os.path.join(mountpath, objectdir)
logging.info("Source directory is %s " % source)
else:
logging.error("Mount point path not set in config file. Please check configuation")
if not os.path.isdir(source):
try:
os.makedirs(source)
logging.debug("Source directory not found. Creating: %s" % source)
except:
logging.error("Unable to create mount path: %s" % source)
# checks if this server is a dom or po, then checks
# if the .db is there, this is how to check if
# the remote filesystem is mounted
if object == 1:
dbfile = "wphost.db"
elif object == 0:
dbfile = "wpdomain.db"
else:
logging.error("object type is not set")
dbpath = os.path.join(source, dbfile)
# if the .db is not found, let's mount it.
if not os.path.isfile(dbpath):
logging.debug("Remote directory is not mounted - mounting %s" % source)
#gets the user, password, remotepath and IP from xml and
# mounts the dir
remoteuser = e.find("remoteuser").text
if remoteuser == None:
logging.error("Remote user not found for this object. Check configuration")
remotepass = e.find("remotepass").text
if remotepass == None:
logging.error("Remote password for this object not found. Check configuration")
ipaddr = e.find("ipaddr").text
if ipaddr == None:
logging.error("IP Address for this object not found. Check configuration")
remotepath = e.find("remotepath").text
if remotepath == None:
logging.error("Remote path not found. Check configuration")
mounttype = e.find("mounttype").text
#find mount type; samba or ncp
if mounttype == None:
logging.error("Mount type not found. Check configuration")
elif mounttype == "samba" or mounttype == "Samba":
mount = "/sbin/mount.cifs"
# sets up the command to mount the remote file system
mountargs = ("-o username=" + remoteuser + ",password=" + remotepass + ",ip=" + ipaddr + ",rw")
#logging.debug("mount arguments are %s" % mountargs)
mountcmd = (mount + sp + remotepath + sp + source + sp + mountargs)
else:
mount = "ncpmount"
if not os.path.isfile('/usr/bin/ncpmount'):
logging.error("ncpmount command not found. please install ncpfs package")
#need to check and accomodate what the remote path ncp string is
if remotepath.startswith('//'):
x = remotepath.split('/',3)
elif remotepath.startswith('/'):
x - remotepath.split('/',2)
else:
x = remotepath.split('/',1)
server = x[-2]
vol = x[-1]
mountargs = ("-S " + server + " -A " + ipaddr + " -V " + vol + " -U " + remoteuser + " -P " + remotepass)
mountcmd = (mount + sp + mountargs + sp + source)
# mount the remote file system
try:
os.system(mountcmd)
except:
logging.error("Mount failed for $s" % mountcmd)
# create some symlinks from the mounted po to a temp area to
# only do dbcopy of the user and msg.db's
# only needed for a PO incremental backup
if backuptype == 2:
logging.debug("Backup type is incremental for post office")
# sets the link path under the fbc/tmp dir, then creates it if not found
linkpath = os.path.join(temppath, objectdir)
logging.debug("DEBUG: temporary link path is %s" % linkpath)
if not os.path.isdir(linkpath):
logging.info("Creating temporary directory with sym links %s for incremental backup." % linkpath)
try:
os.makedirs(linkpath)
except:
logging.error("Could not create temporary directory %s" % linkpath)
# set up array of files/dirs to create links for
# then iterate thru the array and create the symlinks
# except for offiles dir
gwlist = (["wphost.db", "gwpo.dc", "ngwguard.db", "ngwguard.rfl", "ngwguard.fbk","ngwguard.dc","ngwcheck.db", "ofuser", "ofmsg", "gwdms", "ofviews"])
for a in gwlist:
sourcelink = os.path.join(source, a)
targetlink = os.path.join(linkpath, a)
try:
if not os.path.islink(targetlink):
os.symlink(sourcelink, targetlink)
except:
logging.error("Could not create symbolic link for %s" % targetlink)
continue
# now set the dbcopy source to the tmp/po directory
source = linkpath
logging.info("Source path for incremental backup is: %s" % source)
return
def needtar():
# this will check if an existing backup exists in the target directory,
# also check if any backups are older than maxinc value
# for po's we're tarring all incrementals with it's full backup and starting
# fresh with a new full backup.
# if so, tar it up in the fbc/tars/domain or po) directory
global basepath
logging.debug("Determining if old backup's should be tarred.")
def tarprep(tardir):
#function to create variables and call createtar() function
global tarsource
tarsource = os.path.join(basepath, tardir)
logging.info("Tarring existing backup directory: %s " % tardir)
createtar(tarsource, tardir)
return
# for a full po backup type
if backuptype == 1:
if basepath != None:
try:
list = sorted(os.listdir(basepath))
except:
logging.error("getting list of directory %s failed" % basepath)
# get the last full backup path
p1 = os.path.basename(e.find("currentfullpath").text)[0:8]
# setup 2 arrays, one for incremental backup dirs, one for full
incdir = [elem for elem in list if elem.split('-')[3] == 'inc']
fulldir = [elem for elem in list if elem.split('-')[3] == 'full']
# we're going to tar all incrementals and their respective full backups
# need to do the incrementals first
for y in incdir:
backdir = y[0:8]
tardir = y
tarprep(tardir)
#now tar the full backups, except for today's
for z in fulldir:
backdir = z[0:8]
if p1 == None or z == os.path.basename(target):
pass
else:
if backdir != p1:
tardir = z
tarprep(tardir)
else:
logging.error("Basepath not set, Can't determine what directory to check.")
# now tar old domain backups
elif object == 0:
oldpath = os.path.join(target, "wpdomain.db")
if not os.path.isfile(oldpath):
pass
else:
tarsouce = target
tardir = os.path.basename(target)
createtar(tarsource, tardir)
if currentfulldate != None:
# get's the last full backup date from xml to use as "latest"
latest = datetime.datetime.strptime(currentfulldate, dateformat)
list = os.listdir(basepath)
for y in list:
try:
t = datetime.datetime.strptime(y, dateformat)
except:
continue
# check dates of all backup directorys, then tar it if it's older
# than today's (latest) minus the maxinc number
if (latest - t) > timedelta(maxinc - 1):
tardir = y
tarsource = os.path.join(basepath, y)
createtar(tarsource, tardir)
else:
pass
return
def createtar(tarsource, tardir):
#function to create a tar of an existing backup
logging.info("Tarring existing backup in directory %s" % tardir)
# create base directory if not found
tarpath = os.path.join(tardirectory, objectdir)
if not os.path.isdir(tarpath):
try:
os.makedirs(tarpath)
except:
logging.error("Failed to create base tar directory %s" % tarpath)
# set som vars and proceed
tarfile = os.path.join(tarpath, tardir[0:8]) + ".tar"
#logging.debug("DEBUG: tarfile = %s" % tarpath)
tarargs = (tarfile + sp + tarsource)
#logging.debug("DEBUG: tarargs = %s" % tarargs)
tarcmd = (tarscript + sp + tarargs +">/dev/null")
#logging.debug("DEBUG: tar command should be %s " % tarcmd)
logging.info("Creating tarfile: %s" % tarfile)
# check available disk space and then call the tarscript
checkspace(tarsource, tardirectory)
try:
os.system(tarcmd)
except:
logging.error("Creation of tar file %s failed" % tarfile)
logging.info("Removing old directory %s " % tarsource)
# now remove the old directory
removeall(tarsource)
# just to make sure... may remove the following or the function later.
if os.path.isdir(tarsource):
try:
logging.error("%s directory is still here, what up with that?" % tarsource)
os.rmdir(tarsource)
except:
logging.info("Failed to remove old directory %s" % tarsource)
return
def prepareblob():
# Function to setup links to copy offiles dir from the latest full
# backup directory
global blobtarget
global blobsource
if e.find("currentfullpath") == None:
logging.debug("DEBUG: Current full path not set. It should be!")
quit()
else:
blobtarget = os.path.normpath(e.find("currentfullpath").text)
blobsource = os.path.join(mountpath, objectdir)
symsource = os.path.join(blobtarget, "offiles")
symtarget = os.path.join(target, "offiles")
try:
os.symlink(symsource, symtarget)
logging.info("Creating symbolic link to offiles")
except:
logging.error("Failed to create offiles symlink")
pass
return
def gettarget():
# function to set the target directory for dbcopy
global restorepath
global target
global basepath
global blobsource
global blobtarget
global tarsource
global currentinc
#create base directory for xml file
basepath = os.path.join(backuppath, objectdir)
for el in e.iter("basepath"):
el.text = basepath
# if this is a full backup, set target path to backup/domain(po)/date
if object == 0:
target = os.path.join(basepath, todayLong)
logging.info("DBCopy target path is %s" % target)
currentfullpath = target
for el in e.iter("currentfullpath"):
el.text = currentfullpath
for el in e.iter("currentfulldate"):
el.text = currentfulldate
# for po full
elif backuptype == 1:
#logging.debug("DEBUG: backup type is %s" % backuptype)
target = os.path.join(basepath, todayLong + '-full')
# set currentfullpath to target, then resets currentinc to 1
currentfullpath = target
for el in e.iter("currentfullpath"):
el.text = currentfullpath
for el in e.iter("currentfulldate"):
el.text = currentfulldate
currentinc = "1"
for el in e.iter("currentinc"):
el.text = currentinc
elif backuptype == 2:
target = os.path.join(basepath, todayLong + "-inc")
#set the current full date
for el in e.iter("currentfulldate"):
el.text = currentfulldate
currentinc += 1
for element in e.iter("currentinc"):
element.text = str(currentinc)
# create the target dir if not found
logging.info("Target directory for backup is %s" % target)
if not os.path.isdir(target):
logging.debug("Target directory not found, creating %s" % target)
os.makedirs(target)
needtar()
restorepath = target
for el in e.iter("restorepath"):
el.text = restorepath
if backuptype == 2:
prepareblob()
return
def docopy(source, target):
checkspace(source, target)
dbargs = source + sp + target
dbcmd = dbscript + sp + str(backuptype) + sp + dbargs + sp + yesterday
result = commands.getstatusoutput(dbcmd)
if result[0] == 0:
pass
else:
print 'DBCopy failed with error status %s' % result[0]
return
def copy():
# Function to start the copy process
#global currentinc
#global restorepath
global args
global starttime
global endtime
global objectname
# get time for timestamp before the copy starts
timestamp = rundate.strftime(timeformat)
#logging.debug("Timestamp is %s" % timestamp)
# sets the dbcopy command with its arguments
logging.info("Starting DBCopy for %s" % e.find("name").text)
#call the doopy function
docopy(source, target)
if backuptype == 2:
# args = " -b -m"
# copy offiles to the last full po backup
logging.info("Starting DBCopy to copy OFFILES for %s" %e.find("name").text)
docopy(blobsource, blobtarget)
#make sure we get the wphost.db
hostsource = os.path.join(source, "wphost.db")
hosttarget = os.path.join(target, "wphost.db")
try:
shutil.copyfile(hostsource, hosttarget)
except:
logging.error("Failed to copy wphost.db!")
# check to see if the timestamp is required, then runs the gwtmstmp command
if object == 1 and gwts == "yes":
logging.info("Setting GroupWise time stamp for %s" % e.find("name").text)
tssource = os.path.join(mountpath, objectdir)
tsargs = (" -p " + tssource + " -s -t " + timestamp)
tscmd = gwtmstmp + tsargs
try:
os.system(tscmd)
except:
logging.error("%s timestamp command failed." % tsmd)
else:
logging.debug("GW timestamp setting not enabled. Not setting timestamp")
# create a restore path if not found, then create a symlink pointing to
# the most current backup
offlinedir = os.path.join(offlinepath, objectdir)
if not os.path.isdir(offlinepath):
os.makedirs(offlinepath)
if os.path.islink(offlinedir):
os.unlink(offlinedir)
try:
os.symlink(restorepath, offlinedir)
except:
logging.error("Creation of sym links failed")
#unmount the source - can't mount a ncp server twice..
if backuptype == 2:
unmount = "umount " + blobsource
#remove symlinks from the temp dir
rmlink = os.path.join(temppath, objectdir)
logging.debug("Removing symbolic links from %s" % rmlink)
removeall(rmlink)
command_result = commands.getstatusoutput(unmount)
#wait until the umount command succeeds.
while command_result[0] != 0:
time.sleep(10)
command_result = commands.getstatusoutput(unmount)
else:
unmount = "umount " + source
command_result = commands.getstatusoutput(unmount)
while command_result[0] != 0:
time.sleep(10)
command_result = commands.getstatusoutput(unmount)
return
def writexml():
# Function to write all the changed data back to the config file
try:
tree.write(configfile, pretty_print=True)
except:
logging.error("Failed to update config file")
def main():
global e
global currentfulldate
global currentinc
global objectname
global logging
global log
global mainlog
#create instance for the main log file
mainlog = (os.path.join(logpath, datetime.datetime.strftime(rundate, shortformat)) + ".log")
log = logger(mainlog)
# and start the main log file
log.info("Starting FreebieCopy for %s" % rundate.strftime(dateformat))
log.info("===========================================")
log.info(" ")
currentfulldate = todayLong
lastrundate = rundate.strftime(dateformat)
for el in root.iter("lastrundate"):
el.text = lastrundate
lastrunstart = (rundate.strftime(dateformat) + sp + datetime.datetime.now().strftime(timeformat))
for el in root.iter("lastrunstart"):
el.text = lastrunstart
# create a lock file so web admin doesn't write to the xml file during
# running of this script
if not os.path.isfile(lockfile):
lock = open(lockfile, 'w')
text = "blank"
try:
lock.write(text)
log.info("Creating lockfile: %s" % lockfile)
except:
log.error("Failed to create lock file: %s" % lockfile)
lock.close()
# iterate thru all GW objects in the xml and
# do the copy
for e in root.findall("./server"):
restorepath = e.find("restorepath").text
starttime = (rundate.strftime(dateformat) + sp + datetime.datetime.now().strftime(timeformat))
for el in e.iter("starttime"):
el.text = starttime
objectname = e.find("name").text
#create instance for domain/po log file
agentlogpath = os.path.join(logpath, objectname)
if not os.path.isdir(agentlogpath):
try:
os.makedirs(agentlogpath)
except:
print("Unable to create log path %s" % agentlogpath)
agentlog = (os.path.join(agentlogpath, datetime.datetime.strftime(rundate, shortformat)) + ".log")
# 'logging' is for dom/po log, 'log' is for main log
logging = logger(agentlog)
log.info("Performing backup for %s" % objectname)
logging.info("===========================================")
logging.info(" ")
logging.info("Starting backup for %s" % objectname)
# add object name to lock file, so web admin can see what's currently running.
rewritelock(objectname)
bktype()
getsource()
gettarget()
copy()
endtime = rundate.strftime(dateformat) + sp + datetime.datetime.now().strftime(timeformat)
for el in e.iter("endtime"):
el.text = endtime
logging.info("Backup for %s complete." % objectname)
logging.info("-----------------------------------------------")
log.info("Backup for %s complete." % objectname)
log.info("-----------------------------------------------")
lastrunend = rundate.strftime(dateformat) + sp + datetime.datetime.now().strftime(timeformat)
for el in root.iter("lastrunend"):
el.text = lastrunend
log.info("Writing updates to config file")
writexml()
tarcleanup()
# kill the lock file
if os.path.isfile(lockfile):
os.remove(lockfile)
log.info("Removing lock file.")
else:
log.error("Unable to delete lock file!!")
log.info(" ")
log.info("To see log files for domains and post office,")
log.info("Go to %s/ domainname/poname" % logpath)
log.info(" ")
log.info("========== FreebieCopy Finished ===========")
log.info(" ")
return
def sendlog():
#function to email log file to admin.
subject = 'FreebieCopy status for %s' % todayLong
message = 'Attached is the log file for this run'
sender = 'admin@freebie.blah.com'
try:
sendemail.send(smtphost, sender, recipient, subject, message, mainlog)
log.info("Log file sent to %s" % recipient)
except:
log.info("Unable to send log file")
log.info("==========================================")
# now everthings defined, let's run this puppy!
main()
if sendlog == "Yes":
sendlog()
| Python |
#!/usr/bin/env python
# Copyright 2010 Morris Blackham
#
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from lxml import etree
import os
import sys
import commands
global path
global ipaddr
global type
#set variables from command line args
(path, ipaddr, type) = sys.argv[1:]
#set vars to GW agent binaries
gwpoa = "/opt/novell/groupwise/agents/bin/gwpoa";
if not os.path.isfile(gwpoa):
print "GroupWise agents not found - Please install before continuing"
quit()
gwmta = "/opt/novell/groupwise/agents/bin/gwmta";
if not os.path.isfile(gwmta):
print "GroupWise agents not found - Please install before continuing"
quit()
#load config file
configfile = "/etc/opt/fbc/fbc.xml"
try:
tree = etree.parse(configfile)
root = tree.getroot()
except:
print("config file %s file not found" % configfile)
# get directory name for domain or po
name = os.path.basename(os.path.dirname(path))
print "name is %s" % name
def getsubnet():
global prefix
subcmd = "ip addr show eth0 | grep inet | awk '{ print substr($2, length($2)-1)}'"
r = commands.getstatusoutput(subcmd)
if r[0] != 0:
print "command failed"
else:
prefix = r[1]
print prefix
return(prefix)
def checkip():
# Ping ip address to see if we can add it as a secondary to this box.
pingcmd = 'ping -c 3 %s' % ipaddr
ping_result = commands.getstatusoutput(pingcmd)
if ping_result[0] == 0:
# Ping succesfull, but let's see if it's bound
# to local machine
ipgrep = "ip address show | grep %s" % ipaddr
if commands.getstatusoutput(ipgrep)[0] == 0:
pass
else:
print "Ping succeeded for %s" % ipaddr
print "and is not a local address, Can not add as a secondary ip."
quit()
else:
# add secondary IP address
getsubnet()
ipadd = 'ip address add %s/%s dev eth0 label eth0.sec' % (ipaddr, prefix)
ipadd_result = commands.getstatusoutput(ipadd)
if ipadd_result[0] != 0:
print "adding IP failed. exiting"
quit()
else:
cfgfile = "/var/fbc/secip.txt"
if not os.path.isfile(cfgfile):
filewrite = open(cfgfile, 'w')
text = ipaddr
try:
filewrite.write(text)
except:
print "could not create IP file!"
quit()
else:
print "file exists", cfgfile
return
def loadgw():
# get the directory to set a sym link for the restore area
#try:
# restorebase = root.find("offlinepath").text
# restorepath = restorebase + name
#except:
# print "offline path is not set in %s. Can not continue" % configfile
#print restorepath
# create the directory if it doesn't exist
#if not os.path.isdir(restorebase):
# os.makedirs(restorebase)
# delete the symlink as it may be for a
# different day's backup, then recreate it.
#if os.path.islink(restorepath):
# try:
# os.remove(restorepath)
# except:
# print "Failed to remove symlink to %s", restorepath
#try:
# os.symlink(path,restorepath)
#except:
# print "Creating simlink to %s failed" % path
# set some vars based on if it's a domain or a po
if type == "domain":
agent = gwmta
agentargs = " --home %s -ip %s >& /dev/null" % (restorepath, ipaddr)
elif type == "po":
agent = gwpoa
agentargs = " --home %s -ip %s --noconfig >& /dev/null" % (restorepath, ipaddr)
else:
print "Incorrect value for %s" % type
# build the command to load the agent, then run the command
agentcmd = "/sbin/startproc -f -t 2 %s %s" % (agent, agentargs)
print agentcmd
#load_result = commands.getstatusoutput(agentcmd)
#print load_result[0]
#if load_result != 0:
# print "Loading agent failed"
return
def main():
checkip()
loadgw()
return
main()
| Python |
#!/usr/bin/env python
# Copyright 2010 Morris Blackham
#
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from lxml import etree
import time
import datetime
from datetime import date, timedelta
from time import strftime
import os
import shutil
#import logging
import sys
import sendemail
import commands
import fileinput
dateformat = "%m-%d-%y"
timeformat = "%H:%M"
shortformat = ('%m-%d')
sp = " "
# rundate is variable for testing, it gets incremented on each run
# when runtest.py is launched
# use for runtest.py
#day = sys.argv[1]
#rundate = datetime.datetime.strptime(day, dateformat)
# use for normal mode
now = datetime.date.today()
day = now.strftime(dateformat)
rundate = datetime.datetime.strptime(day, dateformat)
logdate = rundate.strftime(shortformat) + sp + datetime.datetime.now().strftime(timeformat) + " : "
#Set date/time formats for file names and timestamps
todayLong = rundate.strftime(dateformat)
yesterday = (rundate - timedelta(1)).strftime(dateformat)
msg = ""
logFile = ""
configfile = "/etc/opt/fbc/fbc.xml"
try:
tree = etree.parse(configfile)
root = tree.getroot()
except:
print("config file %s file not found" % configfile)
#setup logging
try:
logpath = os.path.normpath(root.find("logpath").text)
except:
logpath = os.getcwd()
if not os.path.isdir(logpath):
try:
os.makedirs(logpath)
except:
print("Unable to create log path %s" % logpath)
loglevel = root.find("loglevel").text
# check if all the binaries and other scripts are in place
gwtmstmp = "/opt/novell/groupwise/agents/bin/gwtmstmp"
dbscript = "/opt/fbc/dbcopy.sh"
if not os.path.isfile(dbscript):
print 'dbcopy.sh script not found, Please install before continuing.'
db = "/opt/novell/groupwise/agents/bin/dbcopy"
if not os.path.isfile(db):
print 'dbcopy not found. Please install before continuing.'
tarscript = "/opt/fbc/fbctar.sh"
if not os.path.isfile(tarscript):
print("Tar shell script not found. Please check installation")
# assign vars from the config file array
backuppath = root.find("backuppath").text
# see if gwtimestamp is enabled
gwts = root.find("gwtimestamp").text
if gwts == "yes":
if not os.path.isfile(gwtmstmp):
print("gwtmstmp binary not found, You must install GW agents to set timestamp")
gwts == "no"
maxinc = int(root.find("maxinc").text)
mountpath = os.path.normpath(root.find("mountpath").text)
offlinepath = os.path.normpath(root.find("offlinepath").text)
temppath = os.path.normpath(root.find("temppath").text)
tardirectory = os.path.normpath(root.find("tarpath").text)
# TO DO: need to find a way to age out old tar files
#tarage = root.find("tarage").text
startday = root.find("startday").text
sendlog = root.find("sendlog").text
smtphost = root.find("smtphost").text
recipient = root.find("emailaddr").text
sp = " "
lockfile = "/etc/opt/fbc/fbc.lock"
class logger:
def __init__(self,logFile):
self.logFile = logFile
def debug(self,msg):
if loglevel == "Verbose":
logfile = open(self.logFile, 'a')
logfile.write(logdate + msg + '\n')
logfile.close()
return
def info(self,msg):
logfile = open(self.logFile, 'a')
logfile.write(logdate + msg + '\n')
logfile.close()
return
def error(self,msg):
logfile = open(self.logFile, 'a')
logfile.write(logdate + msg + '\n')
logfile.write(logdate + "Critical Error encountered! Exiting program.\n")
print("Critical ERROR encountered: %s " % msg)
print("Exiting program!")
quit()
def senderror(errortext):
# Function to send error email, need to add instances of when to send...
subject = 'FreebieCopy error found on %s' % todayLong
message = """\
FreebieCopy encountered the following error:
%s
""" % errortext
sender = 'admin@freebie.blah.com'
attachment = 'blank'
sendemail.send(smtphost, sender, recipient, subject, message, attachment)
return
def removeall(path):
# Function to delete all files from a directory,
if not os.path.isdir(path):
return
else:
shutil.rmtree(path)
def rewritelock(replacestring):
f = open(lockfile,'r')
contents = f.readline()
f.close()
for line in fileinput.input(lockfile, inplace=1):
if contents in line:
line = line.replace(contents,(replacestring + "\n"))
sys.stdout.write(line)
def tarcleanup():
# TO DO: need some code here to check the tar directory and either delete
# old tars or scp (or some other method) to offline storage
# OR, make it a separate script, prbly a shell script
return
def checkspace(source, volume):
# Function to check if we have enough diskspace available
#walk the mounted domain or po directory and get the size
fsize = 0
for (path, dirs, files) in os.walk(source):
for file in files:
filename = os.path.join(path, file)
if os.path.exists(filename):
fsize += os.path.getsize(filename)
# mulitply by 2 to make sure we really have enough
safesize = fsize * 2
# get available diskspace for volume where target directory is located
stats = os.statvfs(volume)
freespace = (stats.f_bavail * stats.f_frsize)
# check if free space is greater than the safe size
if freespace > safesize:
pass
else:
errortext = "Insufficient disk space to continue! Exiting"
logging.error(errortext)
log.error(errortext)
return
def bktype():
# Function to set what type of backup will be run; full or incremental?
global backuptype
global object
global objectdir
global currentinc
logging.debug("Determining object type and backup type")
#find if it's a domain or post office
if e.find("type").text == "po":
object = 1
#logging.debug("object = post office")
currentinc = e.find("currentinc").text
#have to set current inc to something or script blows chunks
if currentinc == None:
currentinc = maxinc
else:
currentinc = int(e.find("currentinc").text)
elif e.find("type").text == "domain":
object = 0
#logging.debug("object type is domain")
else:
logging.error("Object type is not set to domain or po, please check configuration")
# check to see if it's Sun, or other defined start day
# or if the currentinc is blank,
#logging.debug("DEBUG: current incremental number is %s" % currentinc)
# !!!!!!!
# next line remarked out for testing purposes.. need to unrem for ship.
#if rundate.strftime("%a") == startday or currentinc == maxinc:
if object == 0:
backuptype = 0
logging.info("Performing a full backup for %s." % e.find("name").text)
elif object == 1:
if currentinc == maxinc:
backuptype = 1
logging.info("Performing a full backup for %s." % e.find("name").text)
# else if the current incremental is less than the max incremental setting
elif currentinc < maxinc and object == 1:
logging.info("Performing an incremental backup for %s." % e.find("name").text)
backuptype = 2
# else if none of the above match, we'll do a full backup
else:
logging.info("Performing a full backup for %s." % e.find("name").text)
backuptype = 0
#logging.debug("DEBUG: backup type is %s" % backuptype)
return
def getsource():
# defines the source directory for dbcopy
global source
global objectdir
# gets the domain or po directory name from the remote path
if e.find("remotepath").text != None:
objectdir = os.path.basename(e.find("remotepath").text)
else:
logging.error("Remote Path is not set for $s. Please check configuration." % objectdir)
#sets the source path to the mount point, creates it if not found under the mount point
if mountpath != None:
source = os.path.join(mountpath, objectdir)
logging.info("Source directory is %s " % source)
else:
logging.error("Mount point path not set in config file. Please check configuation")
if not os.path.isdir(source):
try:
os.makedirs(source)
logging.debug("Source directory not found. Creating: %s" % source)
except:
logging.error("Unable to create mount path: %s" % source)
# checks if this server is a dom or po, then checks
# if the .db is there, this is how to check if
# the remote filesystem is mounted
if object == 1:
dbfile = "wphost.db"
elif object == 0:
dbfile = "wpdomain.db"
else:
logging.error("object type is not set")
dbpath = os.path.join(source, dbfile)
# if the .db is not found, let's mount it.
if not os.path.isfile(dbpath):
logging.debug("Remote directory is not mounted - mounting %s" % source)
#gets the user, password, remotepath and IP from xml and
# mounts the dir
remoteuser = e.find("remoteuser").text
if remoteuser == None:
logging.error("Remote user not found for this object. Check configuration")
remotepass = e.find("remotepass").text
if remotepass == None:
logging.error("Remote password for this object not found. Check configuration")
ipaddr = e.find("ipaddr").text
if ipaddr == None:
logging.error("IP Address for this object not found. Check configuration")
remotepath = e.find("remotepath").text
if remotepath == None:
logging.error("Remote path not found. Check configuration")
mounttype = e.find("mounttype").text
#find mount type; samba or ncp
if mounttype == None:
logging.error("Mount type not found. Check configuration")
elif mounttype == "samba" or mounttype == "Samba":
mount = "/sbin/mount.cifs"
# sets up the command to mount the remote file system
mountargs = ("-o username=" + remoteuser + ",password=" + remotepass + ",ip=" + ipaddr + ",rw")
#logging.debug("mount arguments are %s" % mountargs)
mountcmd = (mount + sp + remotepath + sp + source + sp + mountargs)
else:
mount = "ncpmount"
if not os.path.isfile('/usr/bin/ncpmount'):
logging.error("ncpmount command not found. please install ncpfs package")
#need to check and accomodate what the remote path ncp string is
if remotepath.startswith('//'):
x = remotepath.split('/',3)
elif remotepath.startswith('/'):
x - remotepath.split('/',2)
else:
x = remotepath.split('/',1)
server = x[-2]
vol = x[-1]
mountargs = ("-S " + server + " -A " + ipaddr + " -V " + vol + " -U " + remoteuser + " -P " + remotepass)
mountcmd = (mount + sp + mountargs + sp + source)
# mount the remote file system
try:
os.system(mountcmd)
except:
logging.error("Mount failed for $s" % mountcmd)
# create some symlinks from the mounted po to a temp area to
# only do dbcopy of the user and msg.db's
# only needed for a PO incremental backup
if backuptype == 2:
logging.debug("Backup type is incremental for post office")
# sets the link path under the fbc/tmp dir, then creates it if not found
linkpath = os.path.join(temppath, objectdir)
logging.debug("DEBUG: temporary link path is %s" % linkpath)
if not os.path.isdir(linkpath):
logging.info("Creating temporary directory with sym links %s for incremental backup." % linkpath)
try:
os.makedirs(linkpath)
except:
logging.error("Could not create temporary directory %s" % linkpath)
# set up array of files/dirs to create links for
# then iterate thru the array and create the symlinks
# except for offiles dir
gwlist = (["wphost.db", "gwpo.dc", "ngwguard.db", "ngwguard.rfl", "ngwguard.fbk","ngwguard.dc","ngwcheck.db", "ofuser", "ofmsg", "gwdms", "ofviews"])
for a in gwlist:
sourcelink = os.path.join(source, a)
targetlink = os.path.join(linkpath, a)
try:
if not os.path.islink(targetlink):
os.symlink(sourcelink, targetlink)
except:
logging.error("Could not create symbolic link for %s" % targetlink)
continue
# now set the dbcopy source to the tmp/po directory
source = linkpath
logging.info("Source path for incremental backup is: %s" % source)
return
def needtar():
# this will check if an existing backup exists in the target directory,
# also check if any backups are older than maxinc value
# for po's we're tarring all incrementals with it's full backup and starting
# fresh with a new full backup.
# if so, tar it up in the fbc/tars/domain or po) directory
global basepath
logging.debug("Determining if old backup's should be tarred.")
def tarprep(tardir):
#function to create variables and call createtar() function
global tarsource
tarsource = os.path.join(basepath, tardir)
logging.info("Tarring existing backup directory: %s " % tardir)
createtar(tarsource, tardir)
return
# for a full po backup type
if backuptype == 1:
if basepath != None:
try:
list = sorted(os.listdir(basepath))
except:
logging.error("getting list of directory %s failed" % basepath)
# get the last full backup path
p1 = os.path.basename(e.find("currentfullpath").text)[0:8]
# setup 2 arrays, one for incremental backup dirs, one for full
incdir = [elem for elem in list if elem.split('-')[3] == 'inc']
fulldir = [elem for elem in list if elem.split('-')[3] == 'full']
# we're going to tar all incrementals and their respective full backups
# need to do the incrementals first
for y in incdir:
backdir = y[0:8]
tardir = y
tarprep(tardir)
#now tar the full backups, except for today's
for z in fulldir:
backdir = z[0:8]
if p1 == None or z == os.path.basename(target):
pass
else:
if backdir != p1:
tardir = z
tarprep(tardir)
else:
logging.error("Basepath not set, Can't determine what directory to check.")
# now tar old domain backups
elif object == 0:
oldpath = os.path.join(target, "wpdomain.db")
if not os.path.isfile(oldpath):
pass
else:
tarsouce = target
tardir = os.path.basename(target)
createtar(tarsource, tardir)
if currentfulldate != None:
# get's the last full backup date from xml to use as "latest"
latest = datetime.datetime.strptime(currentfulldate, dateformat)
list = os.listdir(basepath)
for y in list:
try:
t = datetime.datetime.strptime(y, dateformat)
except:
continue
# check dates of all backup directorys, then tar it if it's older
# than today's (latest) minus the maxinc number
if (latest - t) > timedelta(maxinc - 1):
tardir = y
tarsource = os.path.join(basepath, y)
createtar(tarsource, tardir)
else:
pass
return
def createtar(tarsource, tardir):
#function to create a tar of an existing backup
logging.info("Tarring existing backup in directory %s" % tardir)
# create base directory if not found
tarpath = os.path.join(tardirectory, objectdir)
if not os.path.isdir(tarpath):
try:
os.makedirs(tarpath)
except:
logging.error("Failed to create base tar directory %s" % tarpath)
# set som vars and proceed
tarfile = os.path.join(tarpath, tardir[0:8]) + ".tar"
#logging.debug("DEBUG: tarfile = %s" % tarpath)
tarargs = (tarfile + sp + tarsource)
#logging.debug("DEBUG: tarargs = %s" % tarargs)
tarcmd = (tarscript + sp + tarargs +">/dev/null")
#logging.debug("DEBUG: tar command should be %s " % tarcmd)
logging.info("Creating tarfile: %s" % tarfile)
# check available disk space and then call the tarscript
checkspace(tarsource, tardirectory)
try:
os.system(tarcmd)
except:
logging.error("Creation of tar file %s failed" % tarfile)
logging.info("Removing old directory %s " % tarsource)
# now remove the old directory
removeall(tarsource)
# just to make sure... may remove the following or the function later.
if os.path.isdir(tarsource):
try:
logging.error("%s directory is still here, what up with that?" % tarsource)
os.rmdir(tarsource)
except:
logging.info("Failed to remove old directory %s" % tarsource)
return
def prepareblob():
# Function to setup links to copy offiles dir from the latest full
# backup directory
global blobtarget
global blobsource
if e.find("currentfullpath") == None:
logging.debug("DEBUG: Current full path not set. It should be!")
quit()
else:
blobtarget = os.path.normpath(e.find("currentfullpath").text)
blobsource = os.path.join(mountpath, objectdir)
symsource = os.path.join(blobtarget, "offiles")
symtarget = os.path.join(target, "offiles")
try:
os.symlink(symsource, symtarget)
logging.info("Creating symbolic link to offiles")
except:
logging.error("Failed to create offiles symlink")
pass
return
def gettarget():
# function to set the target directory for dbcopy
global restorepath
global target
global basepath
global blobsource
global blobtarget
global tarsource
global currentinc
#create base directory for xml file
basepath = os.path.join(backuppath, objectdir)
for el in e.iter("basepath"):
el.text = basepath
# if this is a full backup, set target path to backup/domain(po)/date
if object == 0:
target = os.path.join(basepath, todayLong)
logging.info("DBCopy target path is %s" % target)
currentfullpath = target
for el in e.iter("currentfullpath"):
el.text = currentfullpath
for el in e.iter("currentfulldate"):
el.text = currentfulldate
# for po full
elif backuptype == 1:
#logging.debug("DEBUG: backup type is %s" % backuptype)
target = os.path.join(basepath, todayLong + '-full')
# set currentfullpath to target, then resets currentinc to 1
currentfullpath = target
for el in e.iter("currentfullpath"):
el.text = currentfullpath
for el in e.iter("currentfulldate"):
el.text = currentfulldate
currentinc = "1"
for el in e.iter("currentinc"):
el.text = currentinc
elif backuptype == 2:
target = os.path.join(basepath, todayLong + "-inc")
#set the current full date
for el in e.iter("currentfulldate"):
el.text = currentfulldate
currentinc += 1
for element in e.iter("currentinc"):
element.text = str(currentinc)
# create the target dir if not found
logging.info("Target directory for backup is %s" % target)
if not os.path.isdir(target):
logging.debug("Target directory not found, creating %s" % target)
os.makedirs(target)
needtar()
restorepath = target
for el in e.iter("restorepath"):
el.text = restorepath
if backuptype == 2:
prepareblob()
return
def docopy(source, target):
checkspace(source, target)
dbargs = source + sp + target
dbcmd = dbscript + sp + str(backuptype) + sp + dbargs + sp + yesterday
result = commands.getstatusoutput(dbcmd)
if result[0] == 0:
pass
else:
print 'DBCopy failed with error status %s' % result[0]
return
def copy():
# Function to start the copy process
#global currentinc
#global restorepath
global args
global starttime
global endtime
global objectname
# get time for timestamp before the copy starts
timestamp = rundate.strftime(timeformat)
#logging.debug("Timestamp is %s" % timestamp)
# sets the dbcopy command with its arguments
logging.info("Starting DBCopy for %s" % e.find("name").text)
#call the doopy function
docopy(source, target)
if backuptype == 2:
# args = " -b -m"
# copy offiles to the last full po backup
logging.info("Starting DBCopy to copy OFFILES for %s" %e.find("name").text)
docopy(blobsource, blobtarget)
#make sure we get the wphost.db
hostsource = os.path.join(source, "wphost.db")
hosttarget = os.path.join(target, "wphost.db")
try:
shutil.copyfile(hostsource, hosttarget)
except:
logging.error("Failed to copy wphost.db!")
# check to see if the timestamp is required, then runs the gwtmstmp command
if object == 1 and gwts == "yes":
logging.info("Setting GroupWise time stamp for %s" % e.find("name").text)
tssource = os.path.join(mountpath, objectdir)
tsargs = (" -p " + tssource + " -s -t " + timestamp)
tscmd = gwtmstmp + tsargs
try:
os.system(tscmd)
except:
logging.error("%s timestamp command failed." % tsmd)
else:
logging.debug("GW timestamp setting not enabled. Not setting timestamp")
# create a restore path if not found, then create a symlink pointing to
# the most current backup
offlinedir = os.path.join(offlinepath, objectdir)
if not os.path.isdir(offlinepath):
os.makedirs(offlinepath)
if os.path.islink(offlinedir):
os.unlink(offlinedir)
try:
os.symlink(restorepath, offlinedir)
except:
logging.error("Creation of sym links failed")
#unmount the source - can't mount a ncp server twice..
if backuptype == 2:
unmount = "umount " + blobsource
#remove symlinks from the temp dir
rmlink = os.path.join(temppath, objectdir)
logging.debug("Removing symbolic links from %s" % rmlink)
removeall(rmlink)
command_result = commands.getstatusoutput(unmount)
#wait until the umount command succeeds.
while command_result[0] != 0:
time.sleep(10)
command_result = commands.getstatusoutput(unmount)
else:
unmount = "umount " + source
command_result = commands.getstatusoutput(unmount)
while command_result[0] != 0:
time.sleep(10)
command_result = commands.getstatusoutput(unmount)
return
def writexml():
# Function to write all the changed data back to the config file
try:
tree.write(configfile, pretty_print=True)
except:
logging.error("Failed to update config file")
def main():
global e
global currentfulldate
global currentinc
global objectname
global logging
global log
global mainlog
#create instance for the main log file
mainlog = (os.path.join(logpath, datetime.datetime.strftime(rundate, shortformat)) + ".log")
log = logger(mainlog)
# and start the main log file
log.info("Starting FreebieCopy for %s" % rundate.strftime(dateformat))
log.info("===========================================")
log.info(" ")
currentfulldate = todayLong
lastrundate = rundate.strftime(dateformat)
for el in root.iter("lastrundate"):
el.text = lastrundate
lastrunstart = (rundate.strftime(dateformat) + sp + datetime.datetime.now().strftime(timeformat))
for el in root.iter("lastrunstart"):
el.text = lastrunstart
# create a lock file so web admin doesn't write to the xml file during
# running of this script
if not os.path.isfile(lockfile):
lock = open(lockfile, 'w')
text = "blank"
try:
lock.write(text)
log.info("Creating lockfile: %s" % lockfile)
except:
log.error("Failed to create lock file: %s" % lockfile)
lock.close()
# iterate thru all GW objects in the xml and
# do the copy
for e in root.findall("./server"):
restorepath = e.find("restorepath").text
starttime = (rundate.strftime(dateformat) + sp + datetime.datetime.now().strftime(timeformat))
for el in e.iter("starttime"):
el.text = starttime
objectname = e.find("name").text
#create instance for domain/po log file
agentlogpath = os.path.join(logpath, objectname)
if not os.path.isdir(agentlogpath):
try:
os.makedirs(agentlogpath)
except:
print("Unable to create log path %s" % agentlogpath)
agentlog = (os.path.join(agentlogpath, datetime.datetime.strftime(rundate, shortformat)) + ".log")
# 'logging' is for dom/po log, 'log' is for main log
logging = logger(agentlog)
log.info("Performing backup for %s" % objectname)
logging.info("===========================================")
logging.info(" ")
logging.info("Starting backup for %s" % objectname)
# add object name to lock file, so web admin can see what's currently running.
rewritelock(objectname)
bktype()
getsource()
gettarget()
copy()
endtime = rundate.strftime(dateformat) + sp + datetime.datetime.now().strftime(timeformat)
for el in e.iter("endtime"):
el.text = endtime
logging.info("Backup for %s complete." % objectname)
logging.info("-----------------------------------------------")
log.info("Backup for %s complete." % objectname)
log.info("-----------------------------------------------")
lastrunend = rundate.strftime(dateformat) + sp + datetime.datetime.now().strftime(timeformat)
for el in root.iter("lastrunend"):
el.text = lastrunend
log.info("Writing updates to config file")
writexml()
tarcleanup()
# kill the lock file
if os.path.isfile(lockfile):
os.remove(lockfile)
log.info("Removing lock file.")
else:
log.error("Unable to delete lock file!!")
log.info(" ")
log.info("To see log files for domains and post office,")
log.info("Go to %s/ domainname/poname" % logpath)
log.info(" ")
log.info("========== FreebieCopy Finished ===========")
log.info(" ")
return
def sendlog():
#function to email log file to admin.
subject = 'FreebieCopy status for %s' % todayLong
message = 'Attached is the log file for this run'
sender = 'admin@freebie.blah.com'
try:
sendemail.send(smtphost, sender, recipient, subject, message, mainlog)
log.info("Log file sent to %s" % recipient)
except:
log.info("Unable to send log file")
log.info("==========================================")
# now everthings defined, let's run this puppy!
main()
if sendlog == "Yes":
sendlog()
| Python |
#!/usr/bin/env python
# Copyright 2010 Morris Blackham
#
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import smtplib
import email.utils
from email.mime.text import MIMEText
from email.MIMEMultipart import MIMEMultipart
from email.MIMEBase import MIMEBase
from email import Utils, Encoders
import mimetypes, sys
def attachment(filename):
fd = open(filename, 'rb')
mimetype, mimeencoding = mimetypes.guess_type(filename)
if mimeencoding or (mimetype is None):
mimetype ='application/octet-stream'
maintype, subtype = mimetype.split('/')
if maintype == 'text':
retval = MIMEText(fd.read(), _subtype=subtype)
else:
retval = MIMEBase(maintype, subtype)
retval.set_paylod(fd.read())
Encoders.encode_base64(retval)
retval.add_header('Content-Disposition', 'attachment', filename = filename)
fd.close()
return retval
def send(smtphost, sender, recipient, subject, message, filename):
#print filename
if filename == 'blank':
msg = MIMEText(message)
body = MIMEText(message, _subtype='plain')
else:
msg = MIMEMultipart()
body = MIMEText(message, _subtype='plain')
msg.attach(body)
msg.attach(attachment(filename))
msg['TO'] = sender
msg['From'] = recipient
msg['Subject'] = subject
msg['Date'] = Utils.formatdate(localtime = 1)
msg['Message-ID'] = Utils.make_msgid()
#print msg.as_string()
server = smtplib.SMTP(smtphost)
#server.set_debuglevel(True)
try:
server.sendmail(sender, [recipient], msg.as_string())
except:
print "connection to %s failed" % smtphost
finally:
server.quit()
| Python |
#!/usr/bin/env python
# Copyright 2010 Morris Blackham
#
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from lxml import etree
import os
import sys
import commands
global path
global ipaddr
global type
#set variables from command line args
(path, ipaddr, type) = sys.argv[1:]
#set vars to GW agent binaries
gwpoa = "/opt/novell/groupwise/agents/bin/gwpoa";
if not os.path.isfile(gwpoa):
print "GroupWise agents not found - Please install before continuing"
quit()
gwmta = "/opt/novell/groupwise/agents/bin/gwmta";
if not os.path.isfile(gwmta):
print "GroupWise agents not found - Please install before continuing"
quit()
#load config file
configfile = "/etc/opt/fbc/fbc.xml"
try:
tree = etree.parse(configfile)
root = tree.getroot()
except:
print("config file %s file not found" % configfile)
# get directory name for domain or po
name = os.path.basename(os.path.dirname(path))
print "name is %s" % name
def getsubnet():
global prefix
subcmd = "ip addr show eth0 | grep inet | awk '{ print substr($2, length($2)-1)}'"
r = commands.getstatusoutput(subcmd)
if r[0] != 0:
print "command failed"
else:
prefix = r[1]
print prefix
return(prefix)
def checkip():
# Ping ip address to see if we can add it as a secondary to this box.
pingcmd = 'ping -c 3 %s' % ipaddr
ping_result = commands.getstatusoutput(pingcmd)
if ping_result[0] == 0:
# Ping succesfull, but let's see if it's bound
# to local machine
ipgrep = "ip address show | grep %s" % ipaddr
if commands.getstatusoutput(ipgrep)[0] == 0:
pass
else:
print "Ping succeeded for %s" % ipaddr
print "and is not a local address, Can not add as a secondary ip."
quit()
else:
# add secondary IP address
getsubnet()
ipadd = 'ip address add %s/%s dev eth0 label eth0.sec' % (ipaddr, prefix)
ipadd_result = commands.getstatusoutput(ipadd)
if ipadd_result[0] != 0:
print "adding IP failed. exiting"
quit()
else:
cfgfile = "/var/fbc/secip.txt"
if not os.path.isfile(cfgfile):
filewrite = open(cfgfile, 'w')
text = ipaddr
try:
filewrite.write(text)
except:
print "could not create IP file!"
quit()
else:
print "file exists", cfgfile
return
def loadgw():
# get the directory to set a sym link for the restore area
#try:
# restorebase = root.find("offlinepath").text
# restorepath = restorebase + name
#except:
# print "offline path is not set in %s. Can not continue" % configfile
#print restorepath
# create the directory if it doesn't exist
#if not os.path.isdir(restorebase):
# os.makedirs(restorebase)
# delete the symlink as it may be for a
# different day's backup, then recreate it.
#if os.path.islink(restorepath):
# try:
# os.remove(restorepath)
# except:
# print "Failed to remove symlink to %s", restorepath
#try:
# os.symlink(path,restorepath)
#except:
# print "Creating simlink to %s failed" % path
# set some vars based on if it's a domain or a po
if type == "domain":
agent = gwmta
agentargs = " --home %s -ip %s >& /dev/null" % (restorepath, ipaddr)
elif type == "po":
agent = gwpoa
agentargs = " --home %s -ip %s --noconfig >& /dev/null" % (restorepath, ipaddr)
else:
print "Incorrect value for %s" % type
# build the command to load the agent, then run the command
agentcmd = "/sbin/startproc -f -t 2 %s %s" % (agent, agentargs)
print agentcmd
#load_result = commands.getstatusoutput(agentcmd)
#print load_result[0]
#if load_result != 0:
# print "Loading agent failed"
return
def main():
checkip()
loadgw()
return
main()
| Python |
#!/usr/bin/env python
# Copyright 2010 Morris Blackham
#
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
import commands
httpconf = "/etc/apache2/httpd.conf"
def getuser():
global passfile
passfile = raw_input("Enter path to Apache password file (do not put under htdoc dir): ")
user = raw_input("Enter FBC admin ID: ")
pwd = raw_input("Enter admin password: ")
if os.path.isfile(passfile):
cmd = ("/usr/bin/htpasswd2 -b " + passfile + " " + user + " " + pwd)
else:
cmd = ("/usr/bin/htpasswd2 -c -b " + passfile + " " + user + " " + pwd)
retcode = commands.getstatusoutput(cmd)
if retcode < 0:
print "Terminated signal", -retcode
else:
print "Ok", retcode
return(passfile)
def writefile(passfile):
y = open(httpconf,'a')
lines = ["<Location /fbc>\n",
" AuthType Basic\n",
" AuthName \"Authentication Required\"\n",
" AuthUserFile \"" + passfile + "\"\n",
" Require valid-user\n",
"</Location>\n"]
y.writelines(lines)
y.close()
def restart():
cmd = "/etc/init.d/apache2 restart"
retcode = commands.getstatusoutput(cmd)
for line in open("/etc/sysconfig/apache2","r"):
if "APACHE_MODULES" in line:
if line[0] == "#":
pass
else:
if "auth_basic" in line:
getuser()
writefile(passfile)
restart()
| Python |
# Django settings for FreeBouting project.
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'FreeBouts', # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = '/home/thedoink/Desktop/gSVN/trunk/Code/FreeBouting/Static/'
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# URL prefix for admin static files -- CSS, JavaScript and images.
# Make sure to use a trailing slash.
# Examples: "http://foo.com/static/admin/", "/static/admin/".
ADMIN_MEDIA_PREFIX = '/static/admin/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'q*om6#@-j==#81zff=+(*8$7*=a#y%7n9q_@n!9er1agsim#n3'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
ROOT_URLCONF = 'FreeBouting.urls'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
"/home/thedoink/Desktop/gSVN/trunk/Code/FreeBouting/Templates"
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'bouting',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
# Uncomment the next line to enable the admin:
# 'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
| Python |
#!/usr/bin/env python
from django.core.management import execute_manager
import imp
try:
imp.find_module('settings') # Assumed to be in the same directory.
except ImportError:
import sys
sys.stderr.write("Error: Can't find the file 'settings.py' in the directory containing %r. It appears you've customized things.\nYou'll have to run django-admin.py, passing it your settings module.\n" % __file__)
sys.exit(1)
import settings
if __name__ == "__main__":
execute_manager(settings)
| Python |
import datetime
def makeInsert(table, args):
global everything
printMe = "insert into bouting_" + table + " values("
lastNull = False
print args
for arg in args:
if arg == "":
printMe = printMe + "NULL, "
lastNull = True
else:
printMe = printMe + "'" + str(arg) + "', "
lastNull = False
if not lastNull:
printMe = printMe[:-2]
else:
printMe = printMe[:-2]
printMe = printMe + ");"
everything = everything + printMe + "\n"
everything = "delete from bouting_weaponrating;\n"
everything = everything + "delete from bouting_refrating;\n"
everything = everything + "delete from bouting_section;\n"
everything = everything + "delete from bouting_division;\n"
everything = everything + "delete from bouting_club;\n"
everything = everything + "delete from bouting_person;\n"
everything = everything + "delete from bouting_usfacard;\n"
# Insert all the ref ratings
u = 0
for i in range(49,58):
for c in range(2007, 2012):
makeInsert("refrating", (u,chr(i),c))
u = u+1
for c in range(2007, 2012):
makeInsert("refrating", (u,"10",c))
u = u+1
# Insert all the weapon ratings
#u = 0
for i in range(65,70):
for c in range(2007, 2012):
makeInsert("weaponrating", (u,chr(i),c))
u = u+1
makeInsert("weaponrating", ("25","U", ""))
# Insert a section
makeInsert("section", ["Mid-Atlantic"])
# Insert a division
makeInsert("division", ["Maryland"])
makeInsert("division", ["Virginia"])
# Insert a club
makeInsert("club", ("0", "Fencing Club A", "FCA", "Maryland", "Severn", "Maryland", "888 Nowhere Plaza", "http://www.website.net", "1", datetime.datetime.now(), "1", datetime.datetime.now()))
makeInsert("club", ("1", "Fencing Club B", "FCB", "Maryland", "Severn", "Maryland", "777 Unknown Road", "http://www.internet.org", "1", datetime.datetime.now(), "1", datetime.datetime.now()))
makeInsert("club", ("2", "Fencing Club C", "FCC", "Virginia", "Severn", "Maryland", "666 Circle Court", "http://www.fencing.org", "1", datetime.datetime.now(), "1", datetime.datetime.now()))
makeInsert("person", ("1", "Adam", "Adams", "M", "", "", "0", "", "", "1", datetime.datetime.now(), "1", datetime.datetime.now()))
makeInsert("person", ("2", "Bridgette", "Brown", "F", "", "", "0", "", "", "1", datetime.datetime.now(), "1", datetime.datetime.now()))
makeInsert("person", ("3", "Cathy", "Curuthers", "F", "", "", "1", "", "", "1", datetime.datetime.now(), "1", datetime.datetime.now()))
makeInsert("person", ("4", "Diana", "Dailey", "F", "", "", "2", "", "", "1", datetime.datetime.now(), "1", datetime.datetime.now()))
makeInsert("person", ("5", "Ethan", "Evers", "M", "", "", "1", "", "", "1", datetime.datetime.now(), "1", datetime.datetime.now()))
makeInsert("person", ("6", "Francis", "Foley", "M", "", "", "0", "", "", "1", datetime.datetime.now(), "1", datetime.datetime.now()))
# Insert some cards
makeInsert("usfacard", ("1", "111111112", "Maryland", "0", "2011-05-13", "2014-07-23", "53", "52", "64", "", "", "", "1", "1", datetime.datetime.now(), "1", datetime.datetime.now()))
makeInsert("usfacard", ("2", "111111113", "Maryland", "0", "2011-07-23", "2015-07-23", "63", "52", "73", "", "", "", "2", "1", datetime.datetime.now(), "1", datetime.datetime.now()))
makeInsert("usfacard", ("3", "111111114", "Virginia", "0", "2012-05-23", "2013-07-23", "53", "54", "74", "", "", "", "3", "1", datetime.datetime.now(), "1", datetime.datetime.now()))
makeInsert("usfacard", ("4", "111111115", "Maryland", "0", "2011-07-23", "2016-07-23", "55", "52", "64", "", "", "", "4", "1", datetime.datetime.now(), "1", datetime.datetime.now()))
makeInsert("usfacard", ("5", "111111116", "Maryland", "0", "2011-12-23", "2017-07-23", "53", "65", "52", "", "", "", "5", "1", datetime.datetime.now(), "1", datetime.datetime.now()))
makeInsert("usfacard", ("6", "111111117", "Maryland", "0", "2011-12-23", "2017-07-23", "53", "65", "52", "", "", "", "6", "1", datetime.datetime.now(), "1", datetime.datetime.now()))
f = open("InsertThis.sql", "w")
f.write(everything)
f.close()
| Python |
import json
import urllib2
import time
import datetime
def importEvent(item, tournament):
print "---EVENT---"
# Parsing the gender from the 5 options (which I don't really understand, but ok)
gender = item['gender'].lower()
if "men" in gender:
gender = "Men"
elif "women" in gender:
gender = "Women"
elif "mixed" in gender:
gender = "Mixed"
print "Gender: " + gender
age = item['age_limit'].lower()
if age == "none":
age = "open"
print "Age: " + age
startDate = item['close_of_reg']
#2012-06-09T12:30:00-04:00 <- Example time object from askfred for reference, need to figure out wth the -04:00 is, maybe timezone, with daylight savings?
# Strip out the weird end part
startDate = startDate[:startDate.rfind("-")]
# Parse the data and conver it to a form django likes
startDate = time.strptime(startDate, "%Y-%m-%dT%H:%M:%S")
start = datetime.datetime.fromtimestamp(time.mktime(startDate))
print "Start: " + str(start)
weapon = item['weapon']
print "Weapon: " + weapon
ratingLimit = item['rating_limit'].lower()
upperLimit = "A"
lowerLimit = "U"
# Translate FRED's rating limits to my own...
if ratingLimit == "div3":
upperLimit = "D"
lowerLimit = "U"
elif ratingLimit == "div2":
upperLimit = "C"
lowerLimit = "U"
elif ratingLimit == "div1":
upperLimit = "A"
lowerLimit = "C"
elif ratingLimit == "div1a":
upperLimit = "A"
lowerLimit = "U"
elif ratingLimit == "open":
upperLimit = "A"
lowerLimit = "U"
elif ratingLimit == "eunder":
upperLimit = "E"
lowerLimit = "U"
elif ratingLimit == "dabove":
upperLimit = "A"
lowerLimit = "D"
elif ratingLimit == "aonly":
upperLimit = "A"
lowerLimit = "A"
elif ratingLimit == "babove":
upperLimit = "A"
lowerLimit = "B"
elif ratingLimit == "unrated":
upperLimit = "U"
lowerLimit = "U"
print "Upper Limit: " + upperLimit
print "Lower Limit: " + lowerLimit
print "Tournament: " + str(tournament["id"])
print item
def importTournament(number):
myAPIKey = "You'll need to get one of these in order to run the script"
tournamentData = json.load(urllib2.urlopen('https://apisandbox.askfred.net/v1/tournament/'+str(number)+'?_api_key='+myAPIKey))
eventData = json.load(urllib2.urlopen('https://apisandbox.askfred.net/v1/event/?tournament_id='+str(number)+'&_api_key='+myAPIKey))
eventData2 = json.load(urllib2.urlopen('https://apisandbox.askfred.net/v1/event/?roundresult='+str(number)+'&_api_key='+myAPIKey))
print eventData2
tournament = tournamentData['tournament']
print "---Tournament---"
print "Name: " + tournament['name']
print "Host: " + "N/A"
print "City: " + tournament['venue']['city']
print "State: " + tournament['venue']['state']
print "Street: " + tournament['venue']['address']
print "Details: " + tournament['comments']
for item in eventData['events']:
importEvent(item, tournament)
importTournament(18573)
| Python |
from django.conf.urls.defaults import *
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('bouting.views',
(r'^bouting/$', 'index'),
(r'^bouting/personList/$', 'personList'),
(r'^bouting/(?P<person_id>\d+)/$', 'detail'),
(r'^bouting/createPerson/$', 'createPerson'),
(r'^bouting/(?P<person_id>\d+)/editPerson/$', 'editPerson'),
(r'^bouting/(?P<tournament_id>\d+)/(?P<event_id>\d+)/(?P<person_id>\d+)/editPerson/$', 'editPerson'),
(r'^bouting/clubList/$', 'clubList'),
(r'^bouting/myClubList/$', 'myClubList'),
(r'^bouting/(?P<club_id>\d+)/clubDetail/$', 'clubDetail'),
(r'^bouting/(?P<club_id>\d+)/editClub/$', 'editClub'),
(r'^bouting/(?P<club_id>\d+)/deleteClub/$', 'deleteClub'),
(r'^bouting/createClub/$', 'createClub'),
(r'^bouting/myTournamentList/$', 'myTournamentList'),
(r'^bouting/tournamentList/$', 'tournamentList'),
(r'^bouting/createTournament/$', 'createTournament'),
(r'^bouting/(?P<tournament_id>\d+)/editTournament/$', 'editTournament'),
(r'^bouting/(?P<tournament_id>\d+)/tournamentDetail/$', 'tournamentDetail'),
(r'^bouting/(?P<tournament_id>\d+)/deleteTournament/$', 'deleteTournament'),
#(r'^bouting/createTournament_Failure/$', 'createTournament_Failure'),
(r'^bouting/(?P<tournament_id>\d+)/(?P<event_id>\d+)/(?P<round_id>\d+)/initialSeeding/$', 'initialSeeding'),
(r'^bouting/(?P<tournament_id>\d+)/createEvent/$', 'createEvent'),
#(r'^bouting/(?P<tournament_id>\d+)/createEvent_Response/$', 'createEvent_Response'),
#(r'^bouting/(?P<tournament_id>\d+)/createEvent_Success/$', 'createEvent_Success'),
(r'^bouting/(?P<tournament_id>\d+)/(?P<event_id>\d+)/editEvent/$', 'editEvent'),
(r'^bouting/(?P<tournament_id>\d+)/(?P<event_id>\d+)/advanceRounds/$', 'advanceRounds'),
(r'^bouting/(?P<tournament_id>\d+)/(?P<event_id>\d+)/confirmEntrants/$', 'confirmEntrants'),
#(r'^bouting/(?P<tournament_id>\d+)/(?P<event_id>\d+)/editEvent_Response/$', 'editEvent_Response'),
(r'^bouting/(?P<tournament_id>\d+)/(?P<event_id>\d+)/deleteEvent/$', 'deleteEvent'),
(r'^bouting/(?P<tournament_id>\d+)/(?P<event_id>\d+)/createRef/$', 'createRef'),
(r'^bouting/(?P<tournament_id>\d+)/(?P<event_id>\d+)/(?P<referee_id>\d+)/deleteRef/$', 'deleteRef'),
(r'^bouting/(?P<tournament_id>\d+)/(?P<event_id>\d+)/createEntrant/$', 'createEntrant'),
(r'^bouting/(?P<tournament_id>\d+)/(?P<event_id>\d+)/(?P<entrant_id>\d+)/deleteEntrant/$', 'deleteEntrant'),
#(r'^bouting/(?P<tournament_id>\d+)/(?P<event_id>\d+)/editEvent_Success/$', 'editEvent_Success'),
(r'^bouting/(?P<tournament_id>\d+)/(?P<event_id>\d+)/(?P<roundNum>\d+)/poolOptions/$', 'poolOptions'),
(r'^bouting/(?P<tournament_id>\d+)/(?P<event_id>\d+)/(?P<round_id>\d+)/generatePools/$', 'generatePools'),
(r'^bouting/(?P<tournament_id>\d+)/(?P<event_id>\d+)/(?P<roundNum>\d+)/roundView/$', 'roundView'),
(r'^bouting/(?P<tournament_id>\d+)/(?P<event_id>\d+)/(?P<roundNum>\d+)/prePool/$', 'prePool'),
(r'^bouting/(?P<tournament_id>\d+)/(?P<event_id>\d+)/(?P<round_id>\d+)/beginFencing/$', 'beginFencing'),
(r'^bouting/(?P<tournament_id>\d+)/(?P<event_id>\d+)/(?P<round_id>\d+)/prepareBouts/$', 'prepareBouts'),
(r'^bouting/(?P<tournament_id>\d+)/(?P<event_id>\d+)/(?P<round_id>\d+)/poolView/$', 'poolView'),
(r'^bouting/(?P<tournament_id>\d+)/(?P<event_id>\d+)/(?P<round_id>\d+)/deView/$', 'deView'),
(r'^bouting/(?P<tournament_id>\d+)/(?P<event_id>\d+)/(?P<round_id>\d+)/editBout/$', 'editBout'),
(r'^bouting/(?P<tournament_id>\d+)/(?P<event_id>\d+)/(?P<round_id>\d+)/roundResults/$', 'roundResults'),
(r'^bouting/(?P<tournament_id>\d+)/tournamentView/$', 'tournamentView'),
(r'^bouting/(?P<tournament_id>\d+)/(?P<event_id>\d+)/eventView/$', 'eventView'),
(r'^bouting/signIn/$', 'signIn'),
(r'^bouting/signOut/$', 'signOut'),
(r'^bouting/signUp/$', 'signUp'),
#(r'^bouting/register/$', 'register'),
#(r'^bouting/(?P<poll_id>\d+)/results/$', 'bouting.views.results'),
#(r'^bouting/(?P<poll_id>\d+)/vote/$', 'bouting.views.vote'),
(r'^admin/', include(admin.site.urls)),
)
| Python |
from django.db import models
import datetime
from django.forms import ModelForm, Textarea, ModelChoiceField
from django.contrib.auth.models import User
from django.core import validators
from django.forms import forms, ModelForm, Textarea, PasswordInput, TextInput
from django import forms
from django.db.models import Q
from django.db.models import F
import random
import string
import math
import re
def letterToNum(letter):
num = 0
if letter == "A":
num = 6
elif letter == "B":
num = 5
elif letter == "C":
num = 4
elif letter == "D":
num = 3
elif letter == "E":
num = 2
elif letter == "U":
num = 1
return num
def compareRating(a, b):
intA = letterToNum(a.rating)
intB = letterToNum(b.rating)
if intA > intB:
return 1
elif intA < intB:
return -1
elif intA == intB:
if a.year > b.year:
return 1
elif a.year < b.year:
return -1
elif a.year == b.year:
return 0
class MetaInfo(models.Model):
createdBy = models.ForeignKey(User, related_name='_createdBy')
createdOn = models.DateTimeField(auto_now_add = True)
editedBy = models.ForeignKey(User, related_name='_editedBy')
editedOn = models.DateTimeField(auto_now = True)
def __str__(self):
return str(self.editedBy) + ":" + str(self.editedOn)
class RefRating(models.Model):
REFEREE_RATING_CHOICES = (
('1', '1'),
('2', '2'),
('3', '3'),
('4', '4'),
('5', '5'),
('6', '6'),
('7', '7'),
('8', '8'),
('9', '9'),
('10', '10'),
('P', 'P')
)
thisYear = datetime.datetime.now().year
YEAR_CHOICES = (
(str(thisYear), str(thisYear)),
(str(thisYear-1), str(thisYear-1)),
(str(thisYear-2), str(thisYear-2)),
(str(thisYear-3), str(thisYear-3)),
(str(thisYear-4), str(thisYear-4)),
)
rating = models.CharField(max_length=2, choices=REFEREE_RATING_CHOICES)
year = models.CharField(max_length=4, choices=YEAR_CHOICES, blank=True)
def __str__(self):
return self.rating+"("+self.year+")"
# Ensures each rating and year combination is unique
class Meta:
unique_together = ('rating', 'year')
class WeaponRating(models.Model):
WEAPON_RATING_CHOICES = (
('A', 'A'),
('B', 'B'),
('C', 'C'),
('D', 'D'),
('E', 'E'),
('U', 'U')
)
thisYear = int(datetime.datetime.now().year)
YEAR_CHOICES = (
(str(thisYear), str(thisYear)),
(str(thisYear-1), str(thisYear-1)),
(str(thisYear-2), str(thisYear-2)),
(str(thisYear-3), str(thisYear-3)),
(str(thisYear-4), str(thisYear-4)),
)
rating = models.CharField(max_length=1, choices=WEAPON_RATING_CHOICES)
year = models.CharField(max_length=4, choices=YEAR_CHOICES, blank=True, null=True)
def __str__(self):
if str(self.rating) != "U":
ret = str(self.rating)+str(self.year)
else:
ret = str(self.rating)
return ret
# Ensures that each rating and year pair is unique
class Meta:
unique_together = ('rating', 'year')
ordering = ['rating', '-year']
class Section(models.Model):
name = models.CharField(primary_key=True, max_length=100)
def __str__(self):
return self.name
class Division(models.Model):
name = models.CharField(primary_key=True, max_length=50)
#section = models.ForeignKey(Section)
def __str__(self):
return self.name
class Club(models.Model):
name = models.CharField(unique=True, max_length=50)
abbreviation = models.CharField(max_length=8)
division = models.ForeignKey(Division)
owners = models.ManyToManyField(User)
city = models.CharField(max_length=50)
state = models.CharField(max_length=25)
street = models.CharField(max_length=100)
webSite = models.URLField("Web Site", blank=True, null=True)
createdBy = models.ForeignKey(User, related_name='club_createdBy')
createdOn = models.DateTimeField(auto_now_add = True)
editedBy = models.ForeignKey(User, related_name='club_editedBy')
editedOn = models.DateTimeField(auto_now = True)
def __str__(self):
return self.name;
def mapifyAddress(self):
fixMe = self.street+","+self.city+","+self.state
return string.replace(fixMe," ","+")
class Person(models.Model):
firstName = models.CharField(max_length=30)
lastName = models.CharField(max_length=30)
gender = models.CharField(max_length=6, choices=(('F', 'Female'),('M', 'Male')))
birthday = models.DateField('Birthday', blank=True, null=True)
#usfaNumber = models.ForeignKey(UsfaCard, related_name="UsfaNumber", verbose_name="USFA #", blank=True, null=True)
email = models.EmailField(verbose_name="Email", unique=True, blank=True, null=True)
primaryClub = models.ForeignKey(Club, related_name="PrimaryClub", verbose_name="Primary")
secondaryClub = models.ForeignKey(Club, related_name="SecondaryClub", verbose_name="Secondary", blank=True, null=True)
account = models.OneToOneField(User, blank=True, null=True)
createdBy = models.ForeignKey(User, related_name='person_createdBy')
createdOn = models.DateTimeField(auto_now_add = True)
editedBy = models.ForeignKey(User, related_name='person_editedBy')
editedOn = models.DateTimeField(auto_now = True)
def printedName(self):
return self.lastName+", "+self.firstName
def __str__(self):
return self.printedName()
def normalName(self):
return self.firstName + " " + self.lastName
def getFencingRating(self, weapon):
try:
card = UsfaCard.objects.all().get(person=self.id)
except Exception as e:
pass
if weapon == "Foil":
return card.fencingFoilRating
elif weapon == "Epee":
return card.fencingEpeeRating
elif weapon == "Saber":
return card.fencingSaberRating
def getFoilRating(self):
try:
card = UsfaCard.objects.all().get(person=self.id)
return card.fencingFoilRating
except Exception as e:
print e
return "U"
def getEpeeRating(self):
try:
card = UsfaCard.objects.all().get(person=self.id)
return card.fencingEpeeRating
except Exception as e:
print e
return "U"
def getSaberRating(self):
try:
card = UsfaCard.objects.all().get(person=self.id)
return card.fencingSaberRating
except Exception as e:
print e
return "U"
class Meta:
ordering = ['lastName', 'firstName']
class UsfaCard(models.Model):
number = models.IntegerField("USFA #", unique = True, max_length=9, blank=True, null=True)
division = models.ForeignKey(Division)
verified = models.BooleanField()
issueDate = models.DateField("Issued", blank=True, null=True,)
expirationDate = models.DateField("Expires", blank=True, null=True,)
fencingFoilRating = models.ForeignKey(WeaponRating, related_name="FencingFoilRating", verbose_name="Foil")
fencingEpeeRating = models.ForeignKey(WeaponRating, related_name="FencingEpeeRating", verbose_name="Epee")
fencingSaberRating = models.ForeignKey(WeaponRating, related_name="FencingSaberRating", verbose_name="Saber")
refFoilRating = models.ForeignKey(RefRating, related_name="RefFoilRating", blank=True, null=True, verbose_name="Foil Reffing")
refEpeeRating = models.ForeignKey(RefRating, related_name="RefEpeeRating", blank=True, null=True, verbose_name="Epee Reffing")
refSaberRating = models.ForeignKey(RefRating, related_name="RefSaberRating", blank=True, null=True, verbose_name="Saber Reffing")
person = models.ForeignKey(Person)
createdBy = models.ForeignKey(User, related_name='usfaCard_createdBy')
createdOn = models.DateTimeField(auto_now_add = True)
editedBy = models.ForeignKey(User, related_name='usfaCard_editedBy')
editedOn = models.DateTimeField(auto_now = True)
def __str__(self):
return str(self.number)
class Tournament(models.Model):
name = models.CharField(max_length=200)
hostClub = models.ForeignKey(Club, verbose_name="Host", blank=True, null=True)
city = models.CharField(max_length=50)
state = models.CharField(max_length=25)
street = models.CharField(max_length=100)
official = models.BooleanField("USFA Official")
details = models.CharField(max_length=2000, blank=True, null=True)
entryFee = models.FloatField("Entry Fee", blank=True, null=True)
eventFee = models.FloatField("Event Fee", blank=True, null=True)
owners = models.ManyToManyField(User)
createdBy = models.ForeignKey(User, related_name='tournament_createdBy')
createdOn = models.DateTimeField(auto_now_add = True)
editedBy = models.ForeignKey(User, related_name='tournament_editedBy')
editedOn = models.DateTimeField(auto_now = True)
def __str__(self):
return self.name
class Event(models.Model):
GENDER_CHOICES = (
("Mixed", "Mixed"),
("Women", "Women"),
("Men", "Men")
)
AGE_CHOICES = (
("open", "Open"),
("vet70", "Veteran 70"),
("vet60", "Veteran 60"),
("vet50", "Veteran 50"),
("vet40", "Veteran 40"),
("vetcombined", "Veteran"),
("senior", "Senior"),
("junior", "Junior"),
("cadet", "Cadet"),
("Y19", "Youth 19"),
("Y14", "Youth 14"),
("Y12", "Youth 12"),
("Y10", "Youth 10"),
("Y8", "Youth 8")
)
WEAPON_CHOICES = (
("Foil", "Foil"),
("Epee", "Epee"),
("Saber", "Saber"),
)
WEAPON_RATING_CHOICES = (
('A', 'A'),
('B', 'B'),
('C', 'C'),
('D', 'D'),
('E', 'E'),
('U', 'U')
)
limitGender = models.CharField(max_length=6, choices=GENDER_CHOICES, verbose_name="Gender")
limitAge = models.CharField(max_length=25, choices=AGE_CHOICES, verbose_name="Age")
limitWeapon = models.CharField(max_length=5, choices=WEAPON_CHOICES, verbose_name="Weapon")
limitMaxRating = models.CharField(max_length=1, choices=WEAPON_RATING_CHOICES, verbose_name="Max Allowed Rating", blank=True, null=True)
limitMinRating = models.CharField(max_length=1, choices=WEAPON_RATING_CHOICES, verbose_name="Min Allowed Rating", blank=True, null=True)
"""
0 - Taking preregistrations. Event has not began.
1 - Not taking preregistrations. Initial seeding has been set. Waiting on clusters to be built
2 - Clusters have been built. Waiting to begin first round of fencing.
3 - Fencing has begun. Waiting for all clusters to finish.
4 - Round is completed. Waiting for data to be entered and to seed new round.
5 - New round seedings have been posted. Fencing has not yet resumed again.
6 - The event has been completed.
"""
status = models.IntegerField(blank=True, null=True, default=0)
start = models.DateTimeField("Start Day/Time")
tournament = models.ForeignKey(Tournament)
createdBy = models.ForeignKey(User, related_name='event_createdBy')
createdOn = models.DateTimeField(auto_now_add = True)
editedBy = models.ForeignKey(User, related_name='event_editedBy')
editedOn = models.DateTimeField(auto_now = True)
def __str__(self):
ratingString = ""
if self.limitMaxRating == "A" and (self.limitMinRating == "U" or self.limitMinRating == None):
ratingString = ""
elif self.limitMaxRating == None and self.limitMinRating == None:
ratingString = ""
elif self.limitMaxRating == self.limitMinRating:
ratingString = self.limitMaxRating
elif self.limitMaxRating == "U" and self.limitMinRating == None:
ratingString = "U"
elif (self.limitMaxRating != "U" and self.limitMaxRating != None) and (self.limitMinRating == "U" or self.limitMinRating == None):
ratingString = self.limitMaxRating + " and Under"
elif (self.limitMaxRating == None) and (self.limitMinRating != "U" and self.limitMinRating != None):
ratingString = self.limitMinRating + " and Over"
else:
ratingString = self.limitMaxRating + "-" + self.limitMinRating
return self.limitGender + " " + self.limitAge + " " + ratingString + " " + self.limitWeapon
def eventName(self):
return self.tournament.name + " - " + str(self)
def getStatus(self):
if self.status == 0:
return "Taking preregistation"
elif self.status == 1:
return "Preregistration closed"
def getReferees(self):
return Referee.objects.all().filter(event=self.id)
def getPossibleEntrants(self):
if self.limitGender == "Mixed":
possible = Person.objects.all()
if self.limitGender == "Women":
possible = Person.objects.all().filter(gender="F")
if self.limitGender == "Men":
possible = Person.objects.all().filter(gender="M")
# Remove people who are already entered
alreadyEntered = Entrant.objects.all().filter(event=self.id)
for entrant in alreadyEntered:
possible = possible.exclude(id=entrant.identity.id)
# Need to implement rating checking
#min = self.limitMinRating
#max = self.limitMaxRating
#
#minNum = letterToNum(min)
#maxNum = letterToNum(max)
#
#print "min: " + str(minNum) + " max: " + str(maxNum)
return possible
def generateInitialSeeding(self, user):
# In the first round, we always seed by rating
if self.getCurrentRound().roundNum == 1:
entrants = self.getEntrants()
if self.limitWeapon == "Foil":
entrants = entrants.order_by('identity__usfacard__fencingFoilRating__rating', '-identity__usfacard__fencingFoilRating__year')
elif self.limitWeapon == "Epee":
entrants = entrants.order_by('identity__usfacard__fencingEpeeRating__rating', '-identity__usfacard__fencingEpeeRating__year')
elif self.limitWeapon == "Saber":
entrants = entrants.order_by('identity__usfacard__fencingSaberRating__rating', '-identity__usfacard__fencingSaberRating__year')
stuff = []
# Shuffle all people with the same ratings
for rating in WeaponRating.objects.all():
if self.limitWeapon == "Foil":
someEntrants = entrants.all().filter(identity__usfacard__fencingFoilRating=rating.id)
elif self.limitWeapon == "Epee":
someEntrants = entrants.all().filter(identity__usfacard__fencingEpeeRating=rating.id)
elif self.limitWeapon == "Saber":
someEntrants = entrants.all().filter(identity__usfacard__fencingSaberRating=rating.id)
someStuff = []
someStuff.extend(someEntrants)
random.shuffle(someStuff)
stuff.extend(someStuff)
# Otherwise, we sort by their place, how they did in the last round
else:
entrants = Fencer.objects.all().filter(round=self.getLastRound()).order_by('place')
stuff = []
stuff.extend(entrants)
thisRound = self.getCurrentRound()
Fencer.objects.all().filter(round=thisRound).delete()
c = 1
for entrant in stuff:
card = UsfaCard.objects.all().get(person=entrant.identity)
Fencer.objects.create(identity=entrant.identity,
editedBy=user,
createdBy=user,
number=c,
place=c,
round=thisRound,
leftHanded=False,
state=0,
yellowCarded=False,
redCardCount=0,
primaryClub=entrant.identity.primaryClub,
secondaryClub=entrant.identity.secondaryClub,
foilRating=card.fencingFoilRating,
epeeRating=card.fencingEpeeRating,
saberRating=card.fencingSaberRating)
c = c + 1
return stuff
def getEntrants(self):
x = Entrant.objects.all().filter(event=self)
return (x)
def getEntrantIdentities(self):
personList = []
for entrant in Entrant.objects.all().filter(event=self):
personList.append(entrant.identity.id)
return personList
def getLastRound(self):
try:
round = Round.objects.get(event=self, roundNum=self.getCurrentRound().roundNum-1)
except:
return self.getCurrentRound()
return round
def getCurrentRound(self):
rounds = Round.objects.all().filter(event=self).order_by("roundNum")
for round in rounds:
if not round.isFinished:
return round
return None
# This returns the newest object by date, this could be bad. We want the last object in the list
#return rounds.latest()
def getNextRound(self):
try:
round = Round.objects.get(event=self, roundNum=self.getCurrentRound().roundNum+1)
except:
return self.getCurrentRound()
return round
class Entrant(models.Model):
identity = models.ForeignKey(Person)
hasPaid = models.BooleanField("Paid")
event = models.ForeignKey(Event)
createdBy = models.ForeignKey(User, related_name='entrant_createdBy')
createdOn = models.DateTimeField(auto_now_add = True)
editedBy = models.ForeignKey(User, related_name='entrant_editedBy')
editedOn = models.DateTimeField(auto_now = True)
def __str__(self):
return self.identity.printedName()
# Ensures that each person can only enter an event once
class Meta:
unique_together = ('identity', 'event')
ordering = ['identity']
class Strip(models.Model):
tournament = models.ForeignKey(Tournament)
number = models.PositiveIntegerField("Number")
grounded = models.BooleanField()
outOfService = models.BooleanField(verbose_name="Broken")
createdBy = models.ForeignKey(User, related_name='strip_createdBy')
createdOn = models.DateTimeField(auto_now_add = True)
editedBy = models.ForeignKey(User, related_name='strip_editedBy')
editedOn = models.DateTimeField(auto_now = True)
def __str__(self):
return str(self.number)
class Meta:
unique_together = ('tournament', 'number')
class Referee(models.Model):
identity = models.ForeignKey(Person)
event = models.ForeignKey(Event)
strip = models.ForeignKey(Strip, blank=True, null=True)
createdBy = models.ForeignKey(User, related_name='referee_createdBy')
createdOn = models.DateTimeField(auto_now_add = True)
editedBy = models.ForeignKey(User, related_name='referee_editedBy')
editedOn = models.DateTimeField(auto_now = True)
def __str__(self):
return self.identity.printedName()
def normalName(self):
return self.identity.normalName()
class Round(models.Model):
ROUND_TYPE_CHOICES = (
(1, "Pools"),
(2, "DE's"),
)
event = models.ForeignKey(Event)
type = models.IntegerField("Round Type", choices=ROUND_TYPE_CHOICES)
roundNum = models.IntegerField("Round Number")
isFinished = models.BooleanField("Is Finished", blank=True, default=False)
# 1.0 = 100%
promotionRate = models.FloatField()
createdBy = models.ForeignKey(User, related_name='round_createdBy')
createdOn = models.DateTimeField(auto_now_add = True)
editedBy = models.ForeignKey(User, related_name='round_editedBy')
editedOn = models.DateTimeField(auto_now = True)
def __str__(self):
return str(self.type)
def getType(self):
if self.type == 1:
return "Pool"
if self.type == 2:
return "DE"
def generatePoolOptions(self):
numEntrants = Entrant.objects.all().filter(event=self.event)
numEntrants = numEntrants.count()
#numEntrants = 142
options = []
sizes = []
for i in range(5, numEntrants/2 + 2):
sizes.append(i)
for size in sizes:
i = numEntrants % size
bigPoolNum = math.floor(numEntrants / size)
for c in range(i, size-1):
bigPoolNum = bigPoolNum - 1
message = ""
bigPoolNum = int(bigPoolNum)
message = str(size-i+bigPoolNum) + " pools: "
if int(bigPoolNum) > 1:
message = message + str(bigPoolNum) + " pools of " + str(size) + " and "
elif int(bigPoolNum) == 1:
message = message + str(bigPoolNum) + " pool of " + str(size) + " and "
else:
pass
if size - i == 1:
message = message + str(size-i) + " pool of " + str(size-1)
else:
message = message + str(size-i) + " pools of " + str(size-1)
if ((size-i) * (size-1)) + (math.fabs(bigPoolNum) * size) == numEntrants:
options.append(message)
options.append("1 pool: 1 pool of " + str(numEntrants))
options.sort(self.organize)
return options
# TODO: EVENTUALLY TAKE INTO ACCOUNT TIES AT VARIOUS PLACES
def determineDePlacement(self):
clusters = Cluster.objects.all().filter(round=self).order_by('number')
fencers = Fencer.objects.all().filter(round=self).order_by('number')
# All the losers together
losers = []
# For each cluster...
for cluster in clusters:
loserList = []
bouts = Bout.objects.all().filter(cluster=cluster)
# And for each bout, get all the losers
for bout in bouts:
loserList.append(bout.getLoser())
# Sort the losers based on their numbers (initial seeding), descending
loserList.sort(key=lambda x: x.number, reverse=False)
losers = loserList + losers
# And eventually prepend the winner to the list
for fencer in fencers:
if fencer not in loserList:
losers.reverse()
losers.append(fencer)
losers.reverse()
break
# Return the list of everyone in placement order, because we're all losers deep down
return losers
def generateDeTable(self, user):
fencers = Fencer.objects.all().filter(round=self).order_by('place')
clusters = Cluster.objects.all().filter(round=self)
#clusters.delete()
#print "woooooo progress!"
# If the Table is already generated, don't do it again
try:
if len(clusters) != 0:
return
except:
pass
numFencers = len(fencers)
roundSize = numFencers
# Add one until we have a power of 2 number of fencers
while math.log(roundSize, 2) != math.floor(math.log(roundSize, 2)):
roundSize = roundSize + 1
# The total number of byes that will be handed out
numByes = roundSize - numFencers
# The total number of byes left to be assigned
byesToGo = numByes
# The total number of rounds of FENCING (a bracket of 2 people has a numRounds = 1)
numRounds = 0
tmp = roundSize
# Set the number of rounds
while tmp != 1:
tmp = tmp / 2
numRounds = numRounds + 1
# Build each of the cluster
newCluster = Cluster(round=self, number=1, createdBy=user, editedBy=user)
newCluster.save()
# Get the shiny new clusters
clusters = Cluster.objects.all().filter(round=self)
topHalf = fencers.filter(place__lte=roundSize/2 )
tableClusters = Cluster.objects.all().filter(round=self)
# The place of an opponent to someone in the top half
opponentPlace = roundSize
# For each of the fencers in the top half...
for fencer in topHalf:
# If a bye still has to be given out, give one
if byesToGo > 0:
byesToGo = byesToGo - 1
leftFencer = fencer
leftFencer.save()
rightFencer = None
# Otherwise, assign two fencers to a bout
else:
leftFencer = fencer
rightFencer = fencers.get(place=opponentPlace)
leftFencer.save()
rightFencer.save()
opponentPlace = opponentPlace - 1
try:
bout = Bout(leftFencer=leftFencer,
rightFencer=rightFencer,
cluster=newCluster,
createdBy=user,
editedBy=user,
maxScore=15)
bout.save()
except Exception as e:
print e
peeps = len(fencers)
sizeMult = 2
numBouts = len(Bout.objects.all().filter(cluster=newCluster))
# For each round (except the first)...
for i in range(2,numRounds+1):
lastCluster = newCluster
numBouts = numBouts/2 + 1
# Make the new round
try:
newCluster = Cluster(round=self, number=i, createdBy=user, editedBy=user)
newCluster.save()
except Exception as e:
print e
# Make the appropriate number of new bouts
for c in range(1, numBouts):
try:
lastBouts = Bout.objects.all().filter(cluster=lastCluster, nextBout=None)
print "last: " + str(lastBouts)
bout = Bout(leftFencer=None,
rightFencer=None,
cluster=newCluster,
createdBy=user,
editedBy=user,
maxScore=15)
bout.save()
# TODO: THIS LOGIC IS POTENTIALLY REALLY REALLY REALLY BAD
# THERE IS ABSOLUTELY NO GUARANTEE THAT THE FIRST TWO BOUTS I PICK OFF
# WILL BE THE CORRECT ONES!!! NEED TO FIND A WAY TO SEARCH BY NUMERS OR SOMETHING
lastBoutA = lastBouts[0]
lastBoutB = lastBouts[1]
lastBoutA.nextBout = bout
lastBoutB.nextBout = bout
lastBoutA.save()
lastBoutB.save()
except Exception as e:
print e
# Decrease the size by two
sizeMult = sizeMult * 2
def generatePools(self, totalPools, totalBig, bigSize, totalSmall, user, primary, secondary, name, division):
fencers = Fencer.objects.all().filter(round=self)
clusters = Cluster.objects.all().filter(round=self)
clusters.delete()
pools = []
# Make the new pools
for i in range (0, totalPools):
Cluster.objects.all().create(
round=self,
number=i+1,
createdBy=user,
editedBy=user
)
# Get all the clusters
clusters = Cluster.objects.all().filter(round=self)
ascending = True
lastCluster = 0
# For each fencer, work down the ordered list of pools, placing one fencer in each pool
for fencer in fencers:
if ascending:
lastCluster = lastCluster + 1
else:
lastCluster = lastCluster - 1
# Once we hit the end of the list of pools, set the current pool to the last/first one in the list, and then traverse in the opposite direction
try:
cluster = Cluster.objects.all().get(round=self, number=lastCluster)
except:
if ascending:
lastCluster = lastCluster - 1
else:
lastCluster = lastCluster + 1
ascending = not ascending
cluster = Cluster.objects.all().get(round=self, number=lastCluster)
# Set the fencer's pool, and save it
fencer.cluster = cluster
fencer.save()
#fencers = Fencer.objects.all().filter(round=self).order_by('?')
#weapon = self.event.limitWeapon
#intFencers = {}
#maxOfClub = {}
#minOfClub = {}
#clubDic = {}
#pools = []
#clubsInPools = []
#
#clubList = []
#clubList.extend(Club.objects.all().filter(PrimaryClub__fencer__round=self))
#for club in clubList:
# maxOfClub[club] = math.ceil(float(clubList.count(club))/totalPools)
# minOfClub[club] = math.floor(float(clubList.count(club))/totalPools)
# clubDic[club] = 0
#
## Remove all clusters that might already be in this round
#Cluster.objects.all().filter(round=self).delete()
#
#for fencer in Fencer.objects.all().filter(round=self):
# fencer.cluster = None
# fencer.save()
#
## Make the new pools
#for i in range (0, totalPools):
# clubsInPools.append(clubDic)
# pools.append([])
# Cluster.objects.all().create(
# round=self,
# number=i+1,
# createdBy=user,
# editedBy=user
# )
#
#clusters = Cluster.objects.all().filter(round=self)
#
## Turn the fencer's rating into a number, and associate them in the dictionary
#for fencer in fencers:
# rating = fencer.getFencingRating(weapon)
#
# # Only sort by ratings if it's the initial round
# if self.roundNum == 1:
# if rating.year == None:
# intFencers[fencer] = (letterToNum(rating.rating) * 10)
# else:
# intFencers[fencer] = (letterToNum(rating.rating) * 10) + (4 - (datetime.datetime.now().year - int(rating.year)))
# else:
# intFencers[fencer] = fencer.place
#
#for rating in WeaponRating.objects.all():
# if weapon == "Foil":
# fencers = Fencer.objects.all().filter(foilRating=rating.id, round=self).order_by('?')
# elif weapon == "Epee":
# fencers = Fencer.objects.all().filter(epeeRating=rating.id, round=self).order_by('?')
# elif weapon == "Saber":
# fencers = Fencer.objects.all().filter(saberRating=rating.id, round=self).order_by('?')
#
# # For each fencer... (Starting with the strongest)
# for fencer in fencers:
# poolRating = 0
# lowestPool = -1
# lowestPoolRating = 99999999999
#
# # Find the number of the weakest pool...
# for i in range(0, len(pools)):
#
# # Skip pools that are full
# if i < totalBig and len(pools[i]) == bigSize:
# continue
# elif i >= totalBig and len(pools[i]) == bigSize-1:
# continue
#
# # If there are people in the pool...
# if pools[i] != []:
# for person in pools[i]:
# poolRating = poolRating + intFencers[person]
# #print "rating " + str(i) + ": " + str(poolRating)
# if poolRating <= lowestPoolRating:
# lowestPool = i
# lowestPoolRating = poolRating
#
# poolRating = 0
# # If there is nobody in the pool...
# else:
# lowestPool = i
# lowestPoolRating = 99999999999
# break
#
# # And join that one
# pools[lowestPool].append(fencer)
# fencer.cluster = clusters[lowestPool]
# fencer.save()
# clubsInPools[lowestPool][fencer.primaryClub] = clubsInPools[lowestPool][fencer.primaryClub]+1
#
# # This controls swapping based on primary club
# if len(Fencer.objects.all().filter(cluster=fencer.cluster, identity__primaryClub=fencer.primaryClub)) > int(maxOfClub[fencer.primaryClub]):
#
# if weapon == "Foil":
# possibleSwaps = Fencer.objects.all().filter(round=self, foilRating=fencer.foilRating).exclude(cluster=fencer.cluster).exclude(cluster=None).exclude(identity__primaryClub=fencer.primaryClub)
# elif weapon == "Epee":
# possibleSwaps = Fencer.objects.all().filter(round=self, epeeRating=fencer.epeeRating).exclude(cluster=fencer.cluster).exclude(cluster=None).exclude(identity__primaryClub=fencer.primaryClub)
# elif weapon == "Saber":
# possibleSwaps = Fencer.objects.all().filter(round=self, saberRating=fencer.saberRating).exclude(cluster=fencer.cluster).exclude(cluster=None).exclude(identity__primaryClub=fencer.primaryClub)
#
#
# if possibleSwaps:
# for swap in possibleSwaps:
# if len(Fencer.objects.all().filter(identity__primaryClub=swap.identity.primaryClub, cluster=fencer.cluster))+1 <= int(maxOfClub[swap.identity.primaryClub]):
# switch = swap
# break
# try:
# tmp = fencer.cluster
# #switch = Fencer.objects.all().get(pk=possibleSwaps[0].id)
# fencer.cluster = switch.cluster
# switch.cluster = tmp
# switch.save()
# fencer.save()
# print "swapped " + str(fencer) + " and " + str(switch)
# except Exception as e:
# pass
#return pools
def organize(self, a, b):
c = re.match('.*? .*?', a)
u = re.match('.*? .*?', b)
c = int(c.group(0))
u = int(u.group(0))
if u < c:
return -1
elif u > c:
return 1
else:
return 0
return 1
def getBouts(self):
return Bout.objects.all().filter(cluster__round=self)
def getClusters(self):
return Cluster.objects.all().filter(round=self)
def printTable(self):
ret = ""
clusters = self.getClusters()
i = 0
# For each cluster...
for cluster in clusters:
# Make a table, one row long
ret = ret + "<table class='bracket' border=0>"
tableSize = cluster.getNumBouts() * 2
# TODO: Need to add a case statement for the ending parts ("semifinals, quarterfinals, etc")
ret = ret + "<th>Table of " + str(tableSize) + "</th>"
i = i + 1
bouts = cluster.getBouts()
c = 0
for bout in bouts:
if c < len(bouts)/2:
top = bout.leftFencer
bottom = bout.rightFencer
else:
top = bout.rightFencer
bottom = bout.leftFencer
ret = ret + "<tr><td>"+str(top)+"</td></tr>"
ret = ret + "<tr><td>"+str(bottom)+"</td></tr>"
c = c + 1
ret = ret + "</table>"
return ret
# A group of fencers in a round. This is like a single pool for Pools, a table set in DE's, and an iteration in Round Robin
class Cluster(models.Model):
round = models.ForeignKey(Round, verbose_name="Round")
# Akin to pool number in pools, round number in De's (table of 16, 8, etc)
number = models.PositiveIntegerField("Pool/Round #")
createdBy = models.ForeignKey(User, related_name='cluster_createdBy')
createdOn = models.DateTimeField(auto_now_add = True)
editedBy = models.ForeignKey(User, related_name='cluster_editedBy')
editedOn = models.DateTimeField(auto_now = True)
def __str__(self):
return str(self.round.getType()) + ":" + str(self.number)
def isEmpty(self):
if Fencer.objects.all().filter(cluster=self) == []:
return True
else:
return False
def getFencersForDe(self):
shouldSwap = False
print "jh" + str(Fencer.objects.all().filter(cluster=self).order_by("number"))
fencers = Fencer.objects.all().filter(cluster=self).order_by("number")
clone = fencers
# TODO: Ensure this algorithm is right! Guess and check != good testing
for i in range(0, len(clone)/2):
b = len(clone)-(i)
if shouldSwap:
fencers[i], fencers[b] = fencers[b], fencers[i]
i = i + 1
shouldSwap = not shouldSwap
return fencers
def getFencers(self):
fencers = Fencer.objects.all().filter(cluster=self).order_by("number")
return fencers
def getBouts(self):
bouts = list(Bout.objects.all().filter(cluster=self))
clone = bouts
shouldSwap = False
# TODO: Ensure this algorithm is right! Guess and check != good testing
for i in range(0, len(clone)/2):
b = len(clone)-(i)
if shouldSwap:
bouts[i], bouts[b] = bouts[b], bouts[i]
i = i + 1
shouldSwap = not shouldSwap
return bouts
def getNumBouts(self):
return len(self.getBouts())
def printDe(self):
biggestRound = Cluster.objects.all().get(round=self.round, number=1)
ret = "<table border=1>"
ret = ret + "<tr>"
ret = ret + "<td>"
ret = ret + "</td>"
ret = ret + "</tr>"
ret = ret + "</table>"
ret = "<html><h2>hi</h2></html>"
return ret
class Meta:
unique_together = ('round', 'number')
class Fencer(models.Model):
identity = models.ForeignKey(Person)
number = models.PositiveIntegerField()
leftHanded = models.BooleanField()
place = models.IntegerField()
round = models.ForeignKey(Round)
cluster = models.ForeignKey(Cluster, blank=True, null=True, on_delete=models.SET_NULL)
# Different numbers mean different states (fencing, injured, ondeck, etc)
state = models.IntegerField()
yellowCarded = models.BooleanField(default=False)
redCardCount = models.IntegerField(default=0)
createdBy = models.ForeignKey(User, related_name='fencer_createdBy')
createdOn = models.DateTimeField(auto_now_add = True)
editedBy = models.ForeignKey(User, related_name='fencer_editedBy')
editedOn = models.DateTimeField(auto_now = True)
# The ratings for each fencer at the time of the event. These should never change once the fencer is created
# These should also be what is referenced instead of the identity's usfa card, because that changes over time.
foilRating = models.ForeignKey(WeaponRating, related_name="foilRating", verbose_name="Foil")
epeeRating = models.ForeignKey(WeaponRating, related_name="epeeRating", verbose_name="Epee")
saberRating = models.ForeignKey(WeaponRating, related_name="saberRating", verbose_name="Saber")
# Again, these are copied from the originial person and should never be changed.
# This is so people can look at past tournaments and not have their information change
primaryClub = models.ForeignKey(Club, related_name="primaryClub", verbose_name="Club I")
secondaryClub = models.ForeignKey(Club, related_name="secondaryClub", verbose_name="Club II", blank=True, null=True)
def __str__(self):
return str(self.number) + ":" + self.identity.lastName
def getFencingRating(self, weapon):
if weapon == "Foil":
return self.foilRating
elif weapon == "Epee":
return self.epeeRating
elif weapon == "Saber":
return self.saberRating
def getVP(self):
totalBouts = float((len(Bout.objects.all().filter(leftFencer=self,finished=True)) + len(Bout.objects.all().filter(rightFencer=self,finished=True))))
if totalBouts < 1:
totalBouts = 1
return float(self.getV()) / totalBouts
def getV(self):
leftWins = len(Bout.objects.all().filter(finished=True, leftFencer=self).filter(leftScore__gte=F('rightScore')))
rightWins = len(Bout.objects.all().filter(finished=True, rightFencer=self).filter(rightScore__gte=F('leftScore')))
return leftWins + rightWins
def getD(self):
leftLosses = len(Bout.objects.all().filter(finished=True, leftFencer=self).filter(leftScore__lt=F('rightScore')))
rightLosses = len(Bout.objects.all().filter(finished=True, rightFencer=self).filter(rightScore__lt=F('leftScore')))
return leftLosses + rightLosses
def getB(self):
return self.getV() + self.getD()
def getInd(self):
return self.getTS() - self.getTR()
def getTS(self):
leftBouts = Bout.objects.all().filter(leftFencer=self)
rightBouts = Bout.objects.all().filter(rightFencer=self)
ind = 0
for bout in leftBouts:
ind = ind + bout.leftScore
for bout in rightBouts:
ind = ind + bout.rightScore
return ind
def getTR(self):
leftBouts = Bout.objects.all().filter(leftFencer=self)
rightBouts = Bout.objects.all().filter(rightFencer=self)
ind = 0
for bout in leftBouts:
ind = ind + bout.rightScore
for bout in rightBouts:
ind = ind + bout.leftScore
return ind
class Bout(models.Model):
# If missing a fencer, that means the bout is a bye
leftFencer = models.ForeignKey(Fencer, related_name="Left", blank=True, null=True)
rightFencer = models.ForeignKey(Fencer, related_name="Right", blank=True, null=True)
leftScore = models.PositiveIntegerField(default=0)
rightScore = models.PositiveIntegerField(default=0)
maxScore = models.PositiveIntegerField(default=5)
timer = models.IntegerField(default=180)
breakTimer = models.IntegerField(default=60)
leftInjuryTimer = models.IntegerField(default=600)
rightInjuryTimer = models.IntegerField(default=600)
period = models.IntegerField(default=1)
finished = models.BooleanField(default=False)
strip = models.ForeignKey(Strip, blank=True, null=True)
cluster = models.ForeignKey(Cluster)
nextBout = models.ForeignKey('self', blank=True, null=True)
createdBy = models.ForeignKey(User, related_name='bout_createdBy')
createdOn = models.DateTimeField(auto_now_add = True)
editedBy = models.ForeignKey(User, related_name='bout_editedBy')
editedOn = models.DateTimeField(auto_now = True)
def __str__(self):
try:
leftPrint = str(self.leftFencer.number)
except:
leftPrint = "?"
try:
rightPrint = str(self.rightFencer.number)
except:
rightPrint = "?"
return rightPrint + " - " + leftPrint
def formatLeftScore(self):
if self.rightScore != 0 and not self.finished:
return self.leftScore
if self.leftScore == 0 and not self.finished:
return ""
elif self.leftScore != 0 and not self.finished:
return str(self.leftScore)
elif self.finished and self.leftScore < self.rightScore:
return "D" + str(self.leftScore)
elif self.finished and self.leftScore > self.rightScore:
return "V" + str(self.leftScore)
def formatRightScore(self):
if self.leftScore != 0 and not self.finished:
return self.rightScore
if self.rightScore == 0 and not self.finished:
return ""
elif self.rightScore != 0 and not self.finished:
return str(self.rightScore)
elif self.finished and self.rightScore < self.leftScore:
return "D" + str(self.rightScore)
elif self.finished and self.rightScore > self.leftScore:
return "V" + str(self.rightScore)
def isInBout(self, a):
if len(Bout.objects.all().filter(leftFencer=a)) + len(Bout.objects.all().filter(rightFencer=a)) > 0:
return True
else:
return False
def prettyDePrint(self):
ret = ""
try:
ret = "(" + str(fencer.number) +")" + fencer.identity.printedName()
except:
ret = "Bye!"
return ret
def getNextBout(self):
print str(self.cluster.round) + "TACO CITY!!!"
def printForDe(self):
if self.leftFencer == None and self.rightFencer == None:
return "? - ?"
try:
leftPrint = self.prettyDePrint(self.leftFencer)
except:
leftPrint = "?"
try:
rightPrint = self.prettyDePrint(self.rightFencer)
except:
rightPrint = "?"
return leftPrint + " - " + rightPrint
def getWinner(self):
if self.finished:
if self.leftScore > self.rightScore:
return self.leftFencer
if self.leftScore < self.rightScore:
return self.rightFencer
else:
return None
def getLoser(self):
if self.finished:
if self.leftScore < self.rightScore:
return self.leftFencer
if self.leftScore > self.rightScore:
return self.rightFencer
else:
return None
class ListedReferee(models.Model):
identity = models.ForeignKey(Person)
event = models.ForeignKey(Event)
createdBy = models.ForeignKey(User, related_name='listedReferee_createdBy')
createdOn = models.DateTimeField(auto_now_add = True)
editedBy = models.ForeignKey(User, related_name='listedReferee_editedBy')
editedOn = models.DateTimeField(auto_now = True)
def __str__(self):
return self.identity.printedName()
class UsfaCardForm(ModelForm):
required_css_class = 'required'
error_css_class = 'error'
def clean_number(self):
data = self.cleaned_data['number']
if len(str(data)) != 9 and data != None:
raise forms.ValidationError("This number is not the correct length.")
return data
class Meta:
model = UsfaCard
exclude = {'createdBy', 'editedBy', 'issueDate', 'expirationDate', 'verified', 'person'}
class ClubForm(ModelForm):
required_css_class = 'required'
error_css_class = 'error'
class Meta:
model = Club
exclude = {'createdBy', 'editedBy'}
class TournamentForm(ModelForm):
required_css_class = 'required'
error_css_class = 'error'
class Meta:
model = Tournament
exclude = {'owners', 'createdBy', 'editedBy'}
class RoundForm(ModelForm):
required_css_class = 'required'
error_css_class = 'error'
class Meta:
model = Round
exclude = {'event', 'createdBy', 'editedBy', 'roundNum'}
class RefereeForm(ModelForm):
class Meta:
model = Referee
exclude = {'createdBy', 'editedBy', 'event', 'strip'}
class EntrantForm(ModelForm):
class Meta:
model = Entrant
exclude = {'hasPaid', 'createdBy', 'editedBy', 'event'}
class EventForm(ModelForm):
required_css_class = 'required'
error_css_class = 'error'
def clean_limitMinRating(self):
min = self.cleaned_data['limitMinRating']
max = self.cleaned_data['limitMaxRating']
if min == None or max == None:
return min
intMin = letterToNum(min)
intMax = letterToNum(max)
if intMin > intMax:
raise forms.ValidationError("The minimum rating must be below or equal to the maximum rating.")
return min
class Meta:
model = Event
exclude = {'createdBy', 'editedBy', 'tournament', 'status'}
class PersonForm(ModelForm):
required_css_class = 'required'
error_css_class = 'error'
def clean_firstName(self):
data = self.cleaned_data['firstName']
data = data.capitalize()
if not data.isalpha():
raise forms.ValidationError("Names must contain only letters.")
return data
def clean_lastName(self):
data = self.cleaned_data['lastName']
data = data.capitalize()
if not data.isalpha():
raise forms.ValidationError("Names must contain only letters.")
return data
def clean_secondaryClub(self):
data = self.cleaned_data['secondaryClub']
if self.cleaned_data['primaryClub'] == data:
raise forms.ValidationError("Primary and secondary clubs must be different.")
class Meta:
model = Person
exclude = {'account', 'createdBy', 'editedBy'}
class UserForm(ModelForm):
required_css_class = 'required'
error_css_class = 'error'
class Meta:
model = User
fields = ('username', 'password')
widgets = {
'username': TextInput(),
'password': PasswordInput(),
} | Python |
"""
This file demonstrates writing tests using the unittest module. These will pass
when you run "manage.py test".
Replace this with more appropriate tests for your application.
"""
from django.test import TestCase
class SimpleTest(TestCase):
def test_basic_addition(self):
"""
Tests that 1 + 1 always equals 2.
"""
self.assertEqual(1 + 1, 2)
| Python |
from django.template import Context, loader
from bouting.models import *
from django.http import HttpResponse
from django.http import Http404
from django.shortcuts import render_to_response, get_object_or_404
from django.core.urlresolvers import reverse
from django.template import RequestContext
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.shortcuts import HttpResponseRedirect
from django.core.mail import send_mail
from django.utils import simplejson
from django.core import serializers
import datetime
import re
def notYours(request):
return HttpResponse("Hey! That's not yours to mess with!")
def signUp(request):
if request.method == "POST":
newUser = User()
userForm = UserForm(request.POST, instance=newUser)
person = Person(createdBy=newUser, editedBy=newUser)
personForm = PersonForm(request.POST, instance=person)
card = UsfaCard(createdBy=newUser, editedBy=newUser, person=person)
usfaCardForm = UsfaCardForm(request.POST, instance=card)
if personForm.is_valid() and usfaCardForm.is_valid() and userForm.is_valid():
newU = userForm.save(commit=False)
newU.set_password(userForm.cleaned_data['password'])
newU.save()
newPerson = personForm.save(commit=False)
newPerson.createdBy=newUser
newPerson.editedBy=newUser
newPerson.save()
newForm = usfaCardForm.save(commit=False)
newForm.person = person
newForm.createdBy=newUser
newForm.editedBy=newUser
newForm.save()
return HttpResponseRedirect('/bouting/')
else:
usfaCardForm = UsfaCardForm(request.POST)
else:
personForm = PersonForm()
usfaCardForm = UsfaCardForm()
userForm = UserForm()
return render_to_response('bouting/signUp.html', {'personForm' : personForm,
'usfaCardForm' : usfaCardForm,
'userForm' : userForm,
}, context_instance=RequestContext(request) )
def signOut(request):
logout(request)
return render_to_response('bouting/signOut.html', {}, context_instance=RequestContext(request) )
def signIn(request):
username = request.POST['username']
password = request.POST['password']
user = authenticate(username=username, password=password)
if user is not None:
if user.is_active:
login(request, user)
# Redirect to a success page.
else:
return HttpResponse("Hello, world. You're at the poll index.")
else:
return HttpResponse("Hello, world. You're at the poll index.")
return render_to_response('bouting/signIn.html', {}, context_instance=RequestContext(request) )
def index(request):
#t = loader.get_template('bouting/index.html')
#c = Context({})
#return HttpResponse(t.render(c))
return render_to_response('bouting/index.html', {}, context_instance=RequestContext(request) )
@login_required(redirect_field_name='/bouting/')
def myTournamentList(request):
tournamentList = Tournament.objects.all().filter(owners=request.user)
return render_to_response('bouting/tournamentList.html', {'tournamentList' : tournamentList}, context_instance=RequestContext(request) )
def tournamentList(request):
tournamentList = Tournament.objects.all()
return render_to_response('bouting/tournamentList.html', {'tournamentList' : tournamentList}, context_instance=RequestContext(request) )
@login_required(redirect_field_name='/bouting/')
def personList(request):
latest_person_list = Person.objects.all().order_by('firstName')
#t = loader.get_template('bouting/personList.html')
#c = Context({
# 'latest_person_list': latest_person_list,
#})
#return HttpResponse(t.render(c))
return render_to_response('bouting/personList.html', {'latest_person_list' : latest_person_list}, context_instance=RequestContext(request) )
def createClub(request):
if request.method == 'POST':
club = Club(createdBy=request.user, editedBy=request.user)
form = ClubForm(request.POST, instance=club)
if form.is_valid():
form.save()
return HttpResponseRedirect('/bouting/') # Redirect after POST
else:
form = ClubForm() # An unbound form
return render_to_response('bouting/createClub.html', {
'form': form,
}, context_instance=RequestContext(request))
def editClub(request, club_id):
try:
club = Club.objects.get(id=club_id)
except:
raise Http404
if not request.user in club.owners.all():
return notYours(request)
if request.method == 'POST':
form = ClubForm(request.POST, instance=club)
if form.is_valid():
form.editedBy = request.user
# Update the owners of all tournaments to include potentially new club owners
form.save()
tournamentList = Tournament.objects.all().filter(hostClub=club)
for tournament in tournamentList:
tournament.owners = club.owners.all()
tournament.save()
return HttpResponseRedirect('/bouting/clubList/') # Redirect after POST
else:
form = ClubForm(instance = club)
return render_to_response('bouting/editClub.html', {
'form': form,
'club' : club,
}, context_instance=RequestContext(request))
def clubDetail(request, club_id):
try:
club = Club.objects.get(id=club_id)
except:
raise Http404
return render_to_response('bouting/clubDetail.html', {'club' : club}, context_instance=RequestContext(request) )
def clubList(request):
clubList = Club.objects.all()
return render_to_response('bouting/clubList.html', {'clubList' : clubList}, context_instance=RequestContext(request) )
def myClubList(request):
clubList = Club.objects.all().filter(owners=request.user)
return render_to_response('bouting/clubList.html', {'clubList' : clubList}, context_instance=RequestContext(request) )
def deleteClub(request, club_id):
try:
club = Club.objects.get(pk=club_id)
except Club.DoesNotExist:
raise Http404
if not request.user in club.owners.all():
return notYours(request)
Club.objects.all().filter(pk=club_id).delete()
return HttpResponseRedirect('/bouting/clubList/')
def editPerson(request, person_id, tournament_id="", event_id=""):
try:
person = Person.objects.all().get(pk=person_id)
card = UsfaCard.objects.all().get(person=person)
except:
raise Http404
print "t:" + tournament_id
if tournament_id == "" and event_id == "":
redirect = '/bouting/'
else:
try:
tournament = Tournament.objects.get(pk=tournament_id)
event = Event.objects.get(pk=event_id)
redirect='/bouting/'+tournament_id+"/"+event_id+"/confirmEntrants/"
except Exception:
raise Http404
if request.method == 'POST':
formA = PersonForm(request.POST, instance=person)
formB = UsfaCardForm(request.POST, instance=card)
if formA.is_valid() and formB.is_valid():
formA.editedBy = request.user
formA.save()
formB.person = person
formB.save()
return HttpResponseRedirect(redirect)
else:
formB = UsfaCardForm(request.POST, instance=card)
else:
formA = PersonForm(instance=person)
formB = UsfaCardForm(instance=card)
return render_to_response('bouting/editPerson.html', {
'formA': formA,
'formB': formB,
'person': person,
'tournament' : tournament,
'event' : event,
}, context_instance=RequestContext(request))
def createPerson(request):
if request.method == 'POST':
person = Person(createdBy=request.user, editedBy=request.user)
formA = PersonForm(request.POST, instance=person)
card = UsfaCard(createdBy=request.user, editedBy=request.user, person=person)
formB = UsfaCardForm(request.POST, instance=card)
if formA.is_valid() and formB.is_valid():
formA.save()
newForm = formB.save(commit=False)
newForm.person = person
newForm.save()
return HttpResponseRedirect('/bouting/')
else:
formB = UsfaCardForm(request.POST)
else:
formA = PersonForm()
formB = UsfaCardForm()
return render_to_response('bouting/createPerson2.html', {
'formA': formA,
'formB': formB,
}, context_instance=RequestContext(request))
def deleteEntrant(request, tournament_id, event_id, entrant_id):
try:
event = Event.objects.get(pk=event_id)
entrant = Entrant.objects.get(pk=entrant_id)
tournament = Tournament.objects.get(pk=tournament_id)
except Exception as e:
raise Http404
#if not request.user in t.owners.all():
# return notYours(request)
events = Event.objects.all().filter(tournament=tournament_id)
personList = Person.objects.all()
Entrant.objects.all().filter(pk=entrant_id).delete()
return HttpResponseRedirect('/bouting/'+str(tournament.id)+'/tournamentDetail/') # Redirect after POST
def createEntrant(request, tournament_id, event_id):
try:
event = Event.objects.get(pk=event_id)
tournament = Tournament.objects.get(pk=tournament_id)
except Exception as e:
print e
raise Http404
#if not request.user in tournament.owners.all():
# return notYours(request)
if request.method == 'POST':
entrant = Entrant(createdBy=request.user, editedBy=request.user, event=event)
form = EntrantForm(request.POST, instance=entrant)
if form.is_valid():
# If the entrant is not allowed to enter the tournament
if not form.cleaned_data['identity'] in event.getPossibleEntrants():
return HttpResponseRedirect('/bouting/'+str(tournament.id)+'/tournamentView/')
form.save()
return HttpResponseRedirect('/bouting/'+str(tournament.id)+'/tournamentView/') # Redirect after POST
else:
pass
return HttpResponseRedirect('/bouting/'+str(tournament.id)+'/tournamentDetail/') # Redirect after POST
def deleteRef(request, tournament_id, event_id, referee_id):
try:
event = Event.objects.get(pk=event_id)
referee = Referee.objects.get(pk=referee_id)
t = Tournament.objects.get(pk=tournament_id)
except Exception as e:
raise Http404
if not request.user in t.owners.all():
return notYours(request)
events = Event.objects.all().filter(tournament=tournament_id)
personList = Person.objects.all()
# Check permissions
if request.user.has_perm('bouting.delete_referee'):
Referee.objects.all().filter(pk=referee_id).delete()
return render_to_response('bouting/createRef_Success.html', {'tournament': t, 'eventList' : events, 'personList' : personList}, context_instance=RequestContext(request))
else:
return notYours(request)
def createRef(request, tournament_id, event_id):
try:
event = Event.objects.get(pk=event_id)
tournament = Tournament.objects.get(pk=tournament_id)
except Exception as e:
print e
raise Http404
if not request.user in tournament.owners.all():
return notYours(request)
if request.method == 'POST':
ref = Referee(createdBy=request.user, editedBy=request.user, event=event)
form = RefereeForm(request.POST, instance=ref)
if form.is_valid():
form.save()
return HttpResponseRedirect('/bouting/'+str(tournament.id)+'/tournamentDetail/') # Redirect after POST
else:
pass
return HttpResponseRedirect('/bouting/'+str(tournament.id)+'/tournamentDetail/') # Redirect after POST
def tournamentView(request, tournament_id):
try:
tournament = Tournament.objects.get(pk=tournament_id)
activeEventList = Event.objects.all().filter(tournament=tournament)
except Exception:
raise Http404
#if not request.user in tournament.owners.all():
#return notYours(request)
return render_to_response('bouting/tournamentView.html', {
'tournament' : tournament,
'eventList' : activeEventList,
}, context_instance=RequestContext(request))
def roundView(request, tournament_id, event_id, roundNum):
try:
tournament = Tournament.objects.get(pk=tournament_id)
event = Event.objects.get(pk=event_id)
round = Round.objects.get(event=event,roundNum=roundNum)
except Exception:
raise Http404
#if not request.user in tournament.owners.all():
# return notYours(request)
# if the current round is a pool round, show the pool view
return render_to_response('bouting/roundView.html', {
'event' : event,
'tournament' : tournament,
'round' : round,
}, context_instance=RequestContext(request))
def eventView(request, tournament_id, event_id):
try:
tournament = Tournament.objects.get(pk=tournament_id)
event = Event.objects.get(pk=event_id)
round = event.getCurrentRound()
roundList = Round.objects.all().filter(event=event)
except Exception:
raise Http404
#if not request.user in tournament.owners.all():
# return notYours(request)
# if the current round is a pool round, show the pool view
return render_to_response('bouting/eventView.html', {
'event' : event,
'tournament' : tournament,
'round' : round,
'roundList' : roundList,
}, context_instance=RequestContext(request))
#('/bouting/' +str(tournament.id)+ '/' +str(event.id) + '/eventView/')
def roundResults(request, tournament_id, event_id, round_id):
try:
tournament = Tournament.objects.get(pk=tournament_id)
event = Event.objects.get(pk=event_id)
round = Round.objects.get(pk=round_id)
except Exception:
raise Http404
fencerList = Fencer.objects.all().filter(round__event=event, round=round).order_by('place')
# For DE's, compute the finished fencer's positions
if round.type == 2:
round.determineDePlacement()
else:
fencerList = Fencer.objects.all().filter(round__event=event, round=round).order_by('place')
return render_to_response('bouting/roundResults.html', {
'event' : event,
'tournament' : tournament,
'round' : round,
'fencerList' : fencerList
}, context_instance=RequestContext(request))
def editBout(request, tournament_id, event_id, round_id):
try:
tournament = Tournament.objects.get(pk=tournament_id)
event = Event.objects.get(pk=event_id)
round = Round.objects.get(pk=round_id)
except Exception:
raise Http404
if not request.user in tournament.owners.all():
return notYours(request)
# Get the bout and the necessary data, then save it
if request.method=="POST":
bout = Bout.objects.get(pk=request.POST["bout"])
bout.leftScore = int(request.POST[str(tournament.id)+"-"+str(event.id)+"-"+str(round.roundNum)+"-"+"leftScore"])
bout.rightScore = int(request.POST[str(tournament.id)+"-"+str(event.id)+"-"+str(round.roundNum)+"-"+"rightScore"])
bout.finished = request.POST["done"]
bout.save()
if bout.nextBout != None:
nextBout = bout.nextBout
if nextBout != None:
if bout.getWinner() == bout.leftFencer:
nextBout.leftFencer = bout.leftFencer
elif bout.getWinner() == bout.rightFencer:
nextBout.rightFencer = bout.rightFencer
nextBout.save()
print "wooo we done!"
# Get all the bouts...
boutList = Bout.objects.all().filter(cluster__round__event=event)
print "bouts to go: " + str(boutList)
# TODO: There is a better way to do this by checking if the boutList is empty and returning only unfinished bouts
# And see if they are all done...
done = True
for bout in boutList:
if not bout.finished:
done = False
print "NOT DONE: " + str(bout)
break
# If they are, set the status of the event...
if done and round.type == 1:
event.status = 4
round.isFinished = True
round.save()
event.save()
# And then set each fencer's rank for each cluster...
for cluster in Cluster.objects.all().filter(round=round):
# TODO: Eventually will have to go back and account for ties.
fencerList = Fencer.objects.all().filter(cluster=cluster)
fencerList = sorted(fencerList, cmp=lambda x,y: cmp(y.getTR(), x.getTR()))
fencerList = sorted(fencerList, cmp=lambda x,y: cmp(y.getTS(), x.getTS()))
fencerList = sorted(fencerList, cmp=lambda x,y: cmp(y.getD(), x.getD()))
fencerList = sorted(fencerList, cmp=lambda x,y: cmp(y.getV(), x.getV()))
fencerList = sorted(fencerList, cmp=lambda x,y: cmp(y.getInd(), x.getInd()))
fencerList = sorted(fencerList, cmp=lambda x,y: cmp(y.getVP(), x.getVP()))
i = 1
for fencer in fencerList:
fencer.place = i
i = i + 1
fencer.save()
event.generateInitialSeeding(request.user)
if done and round.type == 2:
event.status = 4
round.isFinished = True
round.save()
event.save()
return HttpResponseRedirect('/bouting/'+str(tournament.id)+'/tournamentView/')
#return HttpResponseRedirect('/bouting/'+str(tournament.id) +'/' + str(event.id) +'/' + str(round.id) +'/poolView/')
def poolView(request, tournament_id, event_id, round_id):
try:
tournament = Tournament.objects.get(pk=tournament_id)
event = Event.objects.get(pk=event_id)
round = Round.objects.get(pk=round_id)
clusterList = Cluster.objects.all().filter(round=round_id)
except Exception:
raise Http404
if not request.user in tournament.owners.all() or round.isFinished:
canEdit = False
else:
canEdit = True
if request.is_ajax():
data = render_to_response('bouting/poolView.html', {
'event' : event,
'tournament' : tournament,
'round' : round,
'poolList' : clusterList,
'canEdit' : canEdit,
}, context_instance=RequestContext(request))
return HttpResponse(data)
return render_to_response('bouting/poolView.html', {
'event' : event,
'tournament' : tournament,
'round' : round,
'poolList' : clusterList,
'canEdit' : canEdit,
}, context_instance=RequestContext(request))
def prepareBouts(request, tournament_id, event_id, round_id):
try:
tournament = Tournament.objects.get(pk=tournament_id)
event = Event.objects.get(pk=event_id)
round = Round.objects.get(pk=round_id)
clusterList = Cluster.objects.all().filter(round=round_id)
except Exception:
raise Http404
if not request.user in tournament.owners.all():
return notYours(request)
# TODO: THIS IS BAAAAAAAD
#Bout.objects.all().filter(round=round).delete()
# If preparing for pools...
if round.type == 1:
for pool in clusterList:
# Clean out old bouts that might exist
i = 0
fencersInPool = Fencer.objects.all().filter(cluster=pool)
for fencerA in fencersInPool:
fencerA.number = i+1
fencerA.save()
for fencerB in fencersInPool[i:]:
if fencerA != fencerB:
bout = Bout(leftFencer=fencerB,
rightFencer=fencerA,
cluster=pool,
createdBy=request.user,
editedBy=request.user)
bout.save()
i = i + 1
boutList = Bout.objects.all()
return HttpResponseRedirect('/bouting/'+str(tournament.id) + '/tournamentView/')
def beginFencing(request, tournament_id, event_id, round_id):
try:
tournament = Tournament.objects.get(pk=tournament_id)
event = Event.objects.get(pk=event_id)
round = Round.objects.get(pk=round_id)
except Exception:
raise Http404
if not request.user in tournament.owners.all():
return notYours(request)
event.status = 3
event.save()
return render_to_response('bouting/beginFencing.html', {
'event' : event,
'tournament' : tournament,
'round' : round,
}, context_instance=RequestContext(request))
def prePool(request, tournament_id, event_id, roundNum):
try:
tournament = Tournament.objects.get(pk=tournament_id)
event = Event.objects.get(pk=event_id)
round = Round.objects.get(event=event,roundNum=roundNum)
clusterList = Cluster.objects.all().filter(round=round.id)
except Exception:
raise Http404
if not request.user in tournament.owners.all():
return notYours(request)
poolList = []
for pool in clusterList:
poolList.append(Fencer.objects.all().filter(cluster=pool))
fencerList = Fencer.objects.all().filter(round__event=event, round=round)
# If the initial seeding hasn't been created yet, make it
if str(fencerList) == "[]":
entrantList = event.generateInitialSeeding(request.user)
fencerList = Fencer.objects.all().filter(round__event=event)
event.status = 1
event.editedBy = request.user
event.save()
options = round.generatePoolOptions()
return render_to_response('bouting/prePool.html', {
'event' : event,
'tournament' : tournament,
'round' : round,
'poolList' : poolList,
}, context_instance=RequestContext(request))
def deView(request, tournament_id, event_id, round_id):
try:
tournament = Tournament.objects.get(pk=tournament_id)
event = Event.objects.get(pk=event_id)
round = Round.objects.get(pk=round_id)
clusterList = Cluster.objects.all().filter(round=round_id)
except Exception:
raise Http404
# TODO: Need to go back and change the canedit variable to include other times you can't edit (like the pool is finished)
if not request.user in tournament.owners.all():
canEdit = False
else:
canEdit = True
#if len(clusterList) == 0:
# round.generateDeTable(request.user)
round.generateDeTable(request.user)
#if len(clusterList) == 0:
# round.generateDeTable(request.user)
#else:
# print "We got clusters!"
try:
firstCluster = clusterList.get(number=1)
print "woo2"
except Exception as e:
print e
print event
print tournament
print round
print clusterList
print firstCluster
print canEdit
return render_to_response('bouting/deView.html', {
'event' : event,
'tournament' : tournament,
'round' : round,
'clusterList' : clusterList,
'firstCluster' : firstCluster,
'canEdit' : canEdit,
}, context_instance=RequestContext(request))
def generatePools(request, tournament_id, event_id, round_id):
try:
tournament = Tournament.objects.get(pk=tournament_id)
event = Event.objects.get(pk=event_id)
round = Round.objects.get(pk=round_id)
clusterList = Cluster.objects.all().filter(round=round_id)
except Exception:
raise Http404
if not request.user in tournament.owners.all():
return notYours(request)
poolList = []
for pool in clusterList:
poolList.append(Fencer.objects.all().filter(cluster=pool))
return render_to_response('bouting/generatePools.html', {
'event' : event,
'tournament' : tournament,
'round' : round,
'poolList' : poolList,
}, context_instance=RequestContext(request))
def poolOptions(request, tournament_id, event_id, roundNum):
try:
tournament = Tournament.objects.get(pk=tournament_id)
event = Event.objects.get(pk=event_id)
round = Round.objects.get(event=event,roundNum=roundNum)
except Exception:
raise Http404
if not request.user in tournament.owners.all():
return notYours(request)
if request.method == 'POST':
format = request.POST['format']
try:
primary = request.POST['primary']
except:
primary = False
try:
secondary = request.POST['secondary']
except:
secondary = False
try:
name = request.POST['name']
except:
name = False
try:
division = request.POST['division']
except:
division = False
# Protect against sending strange answers, basically serves as "is_valid"
options = round.generatePoolOptions()
if format in options:
m = re.split(" ", format)
totalPools = int(m[0])
totalBigPools = int(m[2])
bigPoolSize = int(m[5])
# If there are no small pools, this would normally cause an error
try:
totalSmallPools = int(m[7])
except:
totalSmallPools = 0
round.generatePools(totalPools, totalBigPools, bigPoolSize, totalSmallPools, request.user, primary, secondary, name, division)
event.status = 2
event.editedBy = request.user
event.save()
return HttpResponseRedirect('/bouting/'+str(tournament.id)+'/tournamentView/') # Redirect after POST
else:
raise Http404
options = round.generatePoolOptions()
return render_to_response('bouting/poolOptions.html', {
'event' : event,
'tournament' : tournament,
'optionList' : options,
'round' : round,
}, context_instance=RequestContext(request))
def initialSeeding(request, tournament_id, event_id, round_id):
try:
tournament = Tournament.objects.get(pk=tournament_id)
event = Event.objects.get(pk=event_id)
round = Round.objects.get(pk=round_id)
except Exception:
raise Http404
if event.getCurrentRound() == round:
isCurrent = True
else:
isCurrent = False
if request.method == 'POST':
entrantList = event.generateInitialSeeding(request.user)
event.status = 1
event.editedBy = request.user
event.save()
return HttpResponseRedirect('/bouting/'+str(tournament.id) +'/' + str(event.id) +'/' + str(round.id) +'/prePool/')
fencerList = Fencer.objects.all().filter(round__event=event, round=round)
return render_to_response('bouting/initialSeeding.html', {
'event' : event,
'tournament' : tournament,
'fencerList' : fencerList,
'round' : round,
'isCurrent' : isCurrent,
}, context_instance=RequestContext(request))
def confirmEntrants(request, tournament_id, event_id):
try:
tournament = Tournament.objects.get(pk=tournament_id)
event = Event.objects.get(pk=event_id)
except Exception:
raise Http404
if not request.user in tournament.owners.all():
return notYours(request)
#rounds = Round.objects.all().filter(event=event)
#
#rounds[0].generatePoolOptions()
entrantList = event.getEntrants()
return render_to_response('bouting/confirmEntrants.html', {
'event' : event,
'tournament' : tournament,
'entrantList' : entrantList,
}, context_instance=RequestContext(request))
def createEvent(request, tournament_id):
try:
tournament = Tournament.objects.get(pk=tournament_id)
except Tournament.DoesNotExist:
raise Http404
if not request.user in tournament.owners.all():
return notYours(request)
if request.method == 'POST':
event = Event(createdBy=request.user, editedBy=request.user, tournament=tournament)
eventForm = EventForm(request.POST, instance=event)
roundForms = [RoundForm(request.POST, prefix=str(x)) for x in range(0,2)]
# If the values from the forms are good, update the round so that the event is set
if eventForm.is_valid() and all([roundForm.is_valid() for roundForm in roundForms]):
eventForm.save()
c = 1
for roundForm in roundForms:
newRound = roundForm.save(commit=False)
newRound.event = event
newRound.roundNum = c
c = c + 1
newRound.createdBy = request.user
newRound.editedBy = request.user
newRound.save()
return HttpResponseRedirect('/bouting/'+str(tournament.id)+'/tournamentView/') # Redirect after POST
else:
roundForms = [RoundForm(prefix=str(x)) for x in range(0,2)]
eventForm = EventForm()
return render_to_response('bouting/createEvent.html', {
'eventForm': eventForm,
'roundForms': roundForms,
'tournament' : tournament,
}, context_instance=RequestContext(request))
def deleteEvent(request, tournament_id, event_id):
try:
tournament = Tournament.objects.get(pk=tournament_id)
e = Event.objects.get(pk=event_id)
except Tournament.DoesNotExist, Event.DoesNotExist:
raise Http404
if not request.user in tournament.owners.all():
return notYours(request)
if request.user.has_perm('bouting.delete_event'):
Event.objects.all().filter(pk=event_id).delete()
return render_to_response('bouting/deleteEvent_Success.html', {'tournament' : tournament}, context_instance=RequestContext(request))
def advanceRounds(request, tournament_id, event_id):
try:
tournament = Tournament.objects.get(pk=tournament_id)
event = Event.objects.get(pk=event_id)
rounds = Round.objects.all().filter(event=event)
except Tournament.DoesNotExist:
raise Http404
if not request.user in tournament.owners.all():
return notYours(request)
if request.method == 'POST':
eventForm = EventForm(request.POST, instance=event)
roundForms = [RoundForm(request.POST, prefix=str(x), instance=rounds[x]) for x in range(0,2)]
# If the values from the forms are good, update the round so that the event is set
if eventForm.is_valid() and all([roundForm.is_valid() for roundForm in roundForms]):
eventForm.editedBy = request.user
eventForm.save()
for roundForm in roundForms:
roundForm.editedBy = request.user
roundForm.save()
return HttpResponseRedirect('/bouting/'+str(tournament.id)+'/tournamentDetail/') # Redirect after POST
else:
roundForms = [RoundForm(prefix=str(x), instance=rounds[x]) for x in range(0,2)]
eventForm = EventForm(instance=event)
return render_to_response('bouting/editEvent.html', {
'eventForm': eventForm,
'roundForms': roundForms,
'tournament' : tournament,
'event' : event,
}, context_instance=RequestContext(request))
def editEvent(request, tournament_id, event_id):
try:
tournament = Tournament.objects.get(pk=tournament_id)
event = Event.objects.get(pk=event_id)
rounds = Round.objects.all().filter(event=event)
except Tournament.DoesNotExist:
raise Http404
if not request.user in tournament.owners.all():
return notYours(request)
if request.method == 'POST':
eventForm = EventForm(request.POST, instance=event)
roundForms = [RoundForm(request.POST, prefix=str(x), instance=rounds[x]) for x in range(0,2)]
# If the values from the forms are good, update the round so that the event is set
if eventForm.is_valid() and all([roundForm.is_valid() for roundForm in roundForms]):
eventForm.editedBy = request.user
eventForm.save()
for roundForm in roundForms:
roundForm.editedBy = request.user
roundForm.save()
return HttpResponseRedirect('/bouting/'+str(tournament.id)+'/tournamentDetail/') # Redirect after POST
else:
roundForms = [RoundForm(prefix=str(x), instance=rounds[x]) for x in range(0,2)]
eventForm = EventForm(instance=event)
return render_to_response('bouting/editEvent.html', {
'eventForm': eventForm,
'roundForms': roundForms,
'tournament' : tournament,
'event' : event,
}, context_instance=RequestContext(request))
def createTournament(request):
if request.method == 'POST':
tournament = Tournament(createdBy=request.user, editedBy=request.user)
form = TournamentForm(request.POST, instance=tournament)
if form.is_valid():
form.save()
# Set the tournament creator and all owners of the host club as owners
tournament.owners.add(request.user)
for clubOwner in tournament.hostClub.owners.all():
tournament.owners.add(clubOwner)
return HttpResponseRedirect('/bouting/'+str(tournament.id)+'/tournamentView/') # Redirect after POST
else:
form = TournamentForm() # An unbound form
return render_to_response('bouting/createTournament.html', {
'form': form,
}, context_instance=RequestContext(request))
def editTournament(request, tournament_id):
try:
tournament = Tournament.objects.get(pk=tournament_id)
except Tournament.DoesNotExist:
raise Http404
if not request.user in tournament.owners.all():
return notYours(request)
if request.method == 'POST':
form = TournamentForm(request.POST, instance = tournament)
if form.is_valid():
form.editedBy = request.user
form.save()
return HttpResponseRedirect('/bouting/'+str(tournament.id)+'/tournamentDetail/')
else:
form = TournamentForm(instance=tournament)
return render_to_response('bouting/editTournament.html', {
'form': form,
'tournament' : tournament,
}, context_instance=RequestContext(request))
def deleteTournament(request, tournament_id):
try:
tournament = Tournament.objects.get(pk=tournament_id)
except Tournament.DoesNotExist:
raise Http404
if not request.user in tournament.owners.all():
return notYours(request)
# Check permissions
if request.user.has_perm('bouting.delete_tournament'):
Tournament.objects.all().filter(pk=tournament_id).delete()
return render_to_response('bouting/deleteTournament_Success.html', {}, context_instance=RequestContext(request) )
def tournamentDetail(request, tournament_id):
try:
t = Tournament.objects.get(pk=tournament_id)
except Tournament.DoesNotExist:
raise Http404
#if not request.user in t.owners.all():
# return notYours(request)
events = Event.objects.all().filter(tournament=tournament_id)
entrantForm = EntrantForm()
refForm = RefereeForm(auto_id='ref-%s')
try:
person = Person.objects.all().get(account=request.user)
except:
person = ""
return render_to_response('bouting/tournamentDetail.html', {'tournament': t,
'eventList' : events,
'entrantForm' : entrantForm,
'refForm' : refForm,
'person' : person}, context_instance=RequestContext(request))
def detail(request, person_id):
try:
p = Person.objects.get(pk=person_id)
try:
c = UsfaCard.objects.get(person=p)
except:
c = "N/A"
except Person.DoesNotExist:
raise Http404
return render_to_response('bouting/detail.html', {'person': p, 'card' : c}, context_instance=RequestContext(request))
| Python |
from bouting.models import *
from django.contrib import admin
admin.site.register(WeaponRating)
admin.site.register(RefRating)
admin.site.register(Tournament)
#admin.site.register(Club)
admin.site.register(Division)
admin.site.register(Section)
#admin.site.register(UsfaCard)
#admin.site.register(Person)
admin.site.register(Event)
admin.site.register(Entrant)
admin.site.register(ListedReferee)
admin.site.register(Referee)
admin.site.register(Round)
admin.site.register(Cluster)
admin.site.register(Bout)
admin.site.register(Strip)
admin.site.register(Fencer)
class ClubAdmin(admin.ModelAdmin):
list_display = ('name', 'abbreviation', )
admin.site.register(Club, ClubAdmin)
class PersonAdmin(admin.ModelAdmin):
# ...
fieldsets = [
(None, {'fields': ['firstName', 'lastName', 'gender', 'email', 'birthday', ]}),
('Clubs', {'fields': ['primaryClub', 'secondaryClub']}),
('MetaData', {'fields': ['account', 'createdBy', 'editedBy']}),
#('Referee Ratings', {'fields': ['refFoilRating', 'refEpeeRating', 'refSaberRating']}),
]
list_display = ('lastName', 'firstName', )
admin.site.register(Person, PersonAdmin)
class UsfaCardAdmin(admin.ModelAdmin):
# ...
fieldsets = [
(None, {'fields': ['person', 'number', 'division', 'verified']}),
('Dates', {'fields': ['issueDate', 'expirationDate']}),
('Fencing Ratings', {'fields': ['fencingFoilRating', 'fencingEpeeRating', 'fencingSaberRating']}),
('Referee Ratings', {'fields': ['refFoilRating', 'refEpeeRating', 'refSaberRating']}),
('MetaData', {'fields': ['createdBy', 'editedBy']}),
]
#list_display = ('lastName', 'firstName', )
admin.site.register(UsfaCard, UsfaCardAdmin) | Python |
#!/usr/bin/env python
from django.core.management import execute_manager
import imp
try:
imp.find_module('settings') # Assumed to be in the same directory.
except ImportError:
import sys
sys.stderr.write("Error: Can't find the file 'settings.py' in the directory containing %r. It appears you've customized things.\nYou'll have to run django-admin.py, passing it your settings module.\n" % __file__)
sys.exit(1)
import settings
if __name__ == "__main__":
execute_manager(settings)
| Python |
# freeband.py
# Copyright (C) 2007 Sergio Perez Camacho - BRUE
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import sys, os, os.path
import soya, soya.sdlconst
import basic3d
import basicsfx
MAX_OBJ_CACHE = 20
if __name__=="__main__":
# Initializes soya (3D engine)
soya.init("FreeBand", 640,480,0)
soya.path.append(os.path.join(os.path.dirname(sys.argv[0]), "data"))
# Creates the scene
scene = soya.World()
light = soya.Light(scene)
light.set_xyz(0.5, 2.0, 2.0)
# Creates a camera.
camera = soya.Camera(scene)
camera.z = 7
camera.y = 9
camera.rotate_x(-40)
soya.set_root_widget(camera)
basicsfx.init()
s = basicsfx.BaseSong(sys.argv[1])
rBeats = []
c = 0
le = MAX_OBJ_CACHE
if len(s.beats[0]) < MAX_OBJ_CACHE: le = len(s.beats[0])
while (( c < MAX_OBJ_CACHE) & (c < len(s.beats[0]))):
rBeats.append(basic3d.BasicElement("BasicDrum1", scene, -6, -4, -s.beats[0][c], s,le,0))
c = c + 1
c = 0
le = MAX_OBJ_CACHE
if len(s.beats[1]) < MAX_OBJ_CACHE: le = len(s.beats[1])
while (( c < MAX_OBJ_CACHE) & (c < len(s.beats[1]))):
rBeats.append(basic3d.BasicElement("BasicDrum2", scene, -3, -4, -s.beats[1][c], s,le,1))
c = c + 1
c = 0
le = MAX_OBJ_CACHE
if len(s.beats[2]) < MAX_OBJ_CACHE: le = len(s.beats[2])
while (( c < MAX_OBJ_CACHE) & (c < len(s.beats[2]))):
rBeats.append(basic3d.BasicElement("BasicDrum1", scene, -0, -4, -s.beats[2][c], s,le,2))
c = c + 1
c = 0
le = MAX_OBJ_CACHE
if len(s.beats[3]) < MAX_OBJ_CACHE: le = len(s.beats[3])
while (( c < MAX_OBJ_CACHE) & (c < len(s.beats[3]))):
rBeats.append(basic3d.BasicElement("BasicDrum1", scene, 3, -4, -s.beats[3][c], s,le,3))
c = c + 1
c = 0
le = MAX_OBJ_CACHE
if len(s.beats[4]) < MAX_OBJ_CACHE: le = len(s.beats[4])
while (( c < MAX_OBJ_CACHE) & (c < len(s.beats[4]))):
rBeats.append(basic3d.BasicElement("BasicDrum1", scene, 6, -4, -s.beats[4][c], s,le,4))
c = c + 1
p = basic3d.Plane("DrumPlane", scene, 0,-5,-7.5, rBeats)
s.play()
soya.MainLoop(scene).main_loop()
| Python |
# sngeditor.py
# Copyright (C) 2007 Sergio Perez Camacho - BRUE
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import pygame
from pygame.locals import *
import sys
pygame.mixer.init()
pygame.mixer.music.load(sys.argv[1])
l1 = []
l2 = []
l3 = []
l4 = []
l5 = []
pygame.init()
screen = pygame.display.set_mode((320,240), HWSURFACE|DOUBLEBUF)
surf = pygame.Surface(screen.get_size())
screen.blit(surf, (0,0))
pygame.display.flip()
pygame.mixer.music.play()
while float(pygame.mixer.music.get_pos()) > 0:
for event in pygame.event.get():
if (event.type == KEYDOWN):
if (event.key==K_q):
print "added 1"
l1.append(pygame.mixer.music.get_pos())
if (event.key==K_w):
print "added 2"
l2.append(pygame.mixer.music.get_pos())
if (event.key==K_e):
print "added 3"
l3.append(pygame.mixer.music.get_pos())
if (event.key==K_r):
print "added 4"
l4.append(pygame.mixer.music.get_pos())
if (event.key==K_t):
print "added 5"
l5.append(pygame.mixer.music.get_pos())
f = open(sys.argv[1][:-4]+".sng","w")
f.write("[BPM]\n")
f.write("140\n")
f.write("[1]\n")
for l in l1:
value = str(l) + "\n"
f.write(value)
f.write("[2]\n")
for l in l2:
value = str(l) + "\n"
f.write(value)
f.write("[3]\n")
for l in l3:
value = str(l) + "\n"
f.write(value)
f.write("[4]\n")
for l in l4:
value = str(l) + "\n"
f.write(value)
f.write("[5]\n")
for l in l5:
value = str(l) + "\n"
f.write(value)
f.close()
| Python |
# basicsfx.py
# Copyright (C) 2007 Sergio Perez Camacho - BRUE
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import pygame
from pygame.locals import *
class BaseSong:
def __init__(self, fSong):
self.beats=[]
# Load the song
print "[FreeBand] Loading " + fSong
pygame.mixer.music.load(fSong)
self.loadsng(fSong[:-4])
print self.bps
print (1000/self.bps)
print 429 % (1000/self.bps)
def loadsng(self, fsng):
# A very ugly and simple parser
f = open(fsng+".sng", "r")
ind = 0
l1 = []
l2 = []
l3 = []
l4 = []
l5 = []
for line_ in f:
line = line_[0:-1]
if line == "[BPM]":
ind=0
continue
if line == "[1]":
ind=1
continue
if line == "[2]":
ind=2
continue
if line == "[3]":
ind=3
continue
if line == "[4]":
ind=4
continue
if line == "[5]":
ind=5
continue
print line
if ind == 0:
self.bpm = float(line)
self.bps = self.bpm / 60
elif ind == 1: l1.append(int(line))
elif ind == 2: l2.append(int(line))
elif ind == 3: l3.append(int(line))
elif ind == 4: l4.append(int(line))
elif ind == 5: l5.append(int(line))
self.beats.append(l1)
self.beats.append(l2)
self.beats.append(l3)
self.beats.append(l4)
self.beats.append(l5)
f.close()
def play(self):
pygame.mixer.music.play()
def pause(self):
pygame.mixer.music.pause()
def stop(self):
pygame.mixer.music.stop()
class SoundKit:
def __init__(self, sound_files):
self.kit = []
for file in sound_files:
self.kit.append(pygame.mixer.Sound(file))
def play(self, index):
if index in range(0, len(self.kit)):
for c in channels:
if not c.get_busy():
c.play(self.kit[index])
break
else:
print "No kit number avaiable"
def stop(self, index):
if index in range(0, len(self.kit)):
self.kit[index].stop()
else:
print "No kit number avaiable"
def init_mixer(n):
lchannels = []
pygame.mixer.init()
pygame.mixer.set_num_channels(n)
print "[FreeBand] Initializing mixer channels to: " + str(n)
for i in range(0,n):
lchannels.append(pygame.mixer.Channel(i))
return lchannels
def init():
channels = init_mixer(20)
| Python |
# basic3d.py
# Copyright (C) 2007 Sergio Perez Camacho - BRUE
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import os, os.path
import soya
import basicsfx
import soya.label3d
import pygame
time = 0.0
ii = [0,0,0,0,0]
GR = 0.5
p = 0
""" class BasicElement
This class represents one playable beat """
class BasicElement(soya.Body):
def __init__(self, blenderfile,scene, x,y,z, s, mObj, inst):
global ii
model = soya.Model.get(blenderfile)
self.x = x
self.y = y
self.zp = z/s.bpm
soya.Body.__init__(self, scene, model)
self.song = s
self.mObj = mObj
self.inst = inst
ii[inst] = mObj
def begin_round(self):
global time
pass
def advance_time(self,proportion):
global time
global ii
if self.z > 0:
self.y = self.y - 0.3
if (self.z > 5) & (ii[self.inst] < len(self.song.beats[self.inst])-1):
self.y = -4
self.zp = -self.song.beats[self.inst][ii[self.inst]]/(self.song.bpm)
ii[self.inst] = ii[self.inst] + 1
print "->" + str(ii[self.inst]) + "zp: " + str(self.zp)
self.z = self.zp + time/(self.song.bpm)
#soya.Body.advance_time(self, proportion)
class Plane(soya.Body):
def __init__(self, blenderfile,scene, x, y, z, rBeats):
model = soya.Model.get(blenderfile)
self.x = x
self.y = y
self.z = z
soya.Body.__init__(self, scene, model)
self.rBeats = rBeats
self.points = soya.label3d.Label3D(scene, "0")
self.points.set_xyz(-5.0,4.0,0.0)
self.points.lit = 0
def proccess(self,proportion):
# Here we should proccess the object
global time
global p
time = float(pygame.mixer.music.get_pos())
if (time == -1): soya.MAIN_LOOP.stop()
for event in soya.process_event():
if event[0] == soya.sdlconst.KEYDOWN:
if (event[1] == soya.sdlconst.K_q):
for c in self.rBeats:
if (c.z > -GR) & (c.z < GR) & (c.inst==0):
p = p + 1
c.y = 1
if (event[1] == soya.sdlconst.K_w):
for c in self.rBeats:
if (c.z > -GR) & (c.z < GR) & (c.inst==1):
p = p + 1
c.y = 1
if (event[1] == soya.sdlconst.K_e):
for c in self.rBeats:
if (c.z > -GR) & (c.z < GR) & (c.inst==2):
p = p + 1
c.y = 1
if (event[1] == soya.sdlconst.K_r):
for c in self.rBeats:
if (c.z > -GR) & (c.z < GR) & (c.inst==3):
p = p + 1
c.y = 1
if (event[1] == soya.sdlconst.K_t):
for c in self.rBeats:
if (c.z > -GR) & (c.z < GR) & (c.inst==4):
p = p + 1
c.y = 1
if event[1] == soya.sdlconst.K_ESCAPE:
soya.MAIN_LOOP.stop()
self.points.text = str(p)
def advance_time(self, proportion):
soya.Body.advance_time(self, proportion)
self.proccess(proportion)
class Instrument(soya.Body):
def __init__(self, blenderfile,scene, x, y, z):
model = soya.Model.get(blenderfile)
self.x = x
self.y = y
self.z = z
soya.Body.__init__(self, scene, model)
def proccess(self,proportion):
# Here we should proccess the object
pass
def advance_time(self, proportion):
soya.Body.advance_time(self, proportion)
self.proccess(proportion)
class Drummer:
def __init__(self, lBeats, plane, instrument, scene):
self.lBeats = lBeats # The list of beats
self.plane = plane # The plane under the beats
self.instrument = instrument # The instrument at the end of
# the plane
| Python |
# mini taeko ... test for pygame music
# Copyright (C) 2007 Sergio Perez Camacho - BRUE
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import pygame
from pygame.locals import *
import sys
WIN_X = 640
WIN_Y = 480
ER = 10
GR = 20
MR = 30
BPS= 140/60
class sound_kit:
def __init__(self, sound_files):
self.kit = []
for file in sound_files:
self.kit.append(pygame.mixer.Sound(file))
def play(self, index):
if index in range(0, len(self.kit)):
for c in channels:
if not c.get_busy():
c.play(self.kit[index])
break
else:
print "No kit number avaiable"
def stop(self, index):
if index in range(0, len(self.kit)):
self.kit[index].stop()
else:
print "No kit number avaiable"
class beat:
def __init__(self, time):
self.time = time
def draw(self, musictime,s):
# print "-----"
# print musictime-MR
# print self.time
# print musictime+1000
# print "------"
if self.time in range(musictime-MR,musictime+1000):
if self.time in range(musictime-GR,musictime+GR):
pygame.draw.circle(s, (255,255,255), (30+((self.time-musictime)/BPS),40), 20, 0)
else:
pygame.draw.circle(s, (100,100,100), (30+((self.time-musictime)/BPS),40), 18, 0)
def init_mixer(n):
lchannels = []
pygame.mixer.init()
pygame.mixer.set_num_channels(n)
for i in range(0,n):
print i
lchannels.append(pygame.mixer.Channel(i))
return lchannels
if __name__ == "__main__":
channels = init_mixer(40)
sk = sound_kit(["snare.ogg","kick.ogg"])
pygame.mixer.music.load("Access_Denied.ogg")
pygame.init()
screen = pygame.display.set_mode((WIN_X,WIN_Y), HWSURFACE|DOUBLEBUF)
surf = pygame.Surface(screen.get_size())
screen.blit(surf, (0,0))
pygame.display.flip()
pygame.mixer.music.play(-1)
l = open("test_l", "r")
m = []
for linea in l:
# print linea[:-1]
m.append(beat(int(linea[:-1])))
f = open("test", "w")
done = False
while not done:
surf.fill((0,0,0))
for j in m:
j.draw(pygame.mixer.music.get_pos(),surf)
screen.blit(surf, (0,0))
# print "-" + str(pygame.mixer.music.get_pos())
pygame.display.flip()
for event in pygame.event.get():
if (event.type == KEYUP):
pass
if (event.type == KEYDOWN):
if (event.key==K_SPACE):
print "added!"
value = str(pygame.mixer.music.get_pos())+"\n"
f.write(value)
if (event.key==K_ESCAPE): done=True
if (event.key==K_q):
sk.play(0)
for j in m:
if j.time in range(pygame.mixer.music.get_pos()-MR,pygame.mixer.music.get_pos()+MR):
print "BUENA!"
if (event.key==K_p):
sk.play(1)
f.close()
pygame.mixer.quit()
pygame.quit()
| Python |
import re
import sys
import math
import os.path
from time import time
import maya.OpenMaya as om
import maya.cmds as mc
import maya.OpenMayaAnim as omanim
import maya.OpenMayaMPx as OpenMayaMPx
import pymel.core.datatypes
import pyawd
from pyawd.core import *
from pyawd.anim import *
from pyawd.scene import *
from pyawd.geom import *
from pyawd.material import *
from pyawd.utils.math import *
b_start = 0.0
def benchmark_start():
global b_start
b_start = time()
def benchmark_stop():
global b_start
dur = time() - b_start
b_start = 0.0
return dur
def benchmark_print():
dur = benchmark_stop()
print('Duration: %fs' % dur)
class MayaAWDFileTranslator(OpenMayaMPx.MPxFileTranslator):
def writer(self, file, options, mode):
file_path = file.resolvedFullName()
base_path = os.path.dirname(file_path)
opts = self.parse_opts(options)
def o(key, defval=None):
'Get option or default value'
if key in opts:
return opts[key]
else:
return defval
with open(file_path, 'wb') as file:
comp_str = o('compression', 'none')
compression = UNCOMPRESSED
if comp_str == 'deflate':
compression = DEFLATE
elif comp_str == 'lzma':
compression = LZMA
wide_mtx = int(o('wide_mtx', False))
wide_geom = int(o('wide_geom', False))
exporter = MayaAWDExporter(file, compression, wide_geom=wide_geom, wide_mtx=wide_mtx)
exporter.include_geom = bool(o('inc_geom', False))
exporter.include_scene = bool(o('inc_scene', False))
exporter.flatten_untransformed = bool(o('flatten_untransformed', False))
exporter.replace_exrefs = bool(o('replace_exrefs', False))
exporter.include_uvanim = bool(o('inc_uvanim', False))
exporter.include_skelanim = bool(o('inc_skelanim', False))
exporter.include_skeletons = bool(o('inc_skeletons', False))
exporter.include_materials = bool(o('inc_materials', False))
exporter.include_cameras = bool(o('inc_cams', False))
exporter.include_lights = bool(o('inc_lights', False))
exporter.embed_textures = bool(o('embed_textures', False))
exporter.alpha_blending = bool(o('alpha_blending', False))
exporter.alpha_threshold = float(o('alpha_threshold', 0.0))
exporter.include_attr = bool(o('inc_attr', False))
if exporter.include_attr:
exporter.user_ns = AWDNamespace(str(o('attrns', '')))
if exporter.include_skelanim:
exporter.animation_sequences = self.read_sequences(o('seqsrc'), base_path)
exporter.joints_per_vert = int(o('jointspervert', 3))
exporter.export(None)
# Copy viewer if preview should be created
create_preview = int(o('create_preview', 0))
if create_preview:
import shutil
import subprocess
pyawd_path = pyawd.__path__[0]
viewer_path = os.path.normpath(os.path.join(pyawd_path, '..', 'mayaawd'))
out_path = os.path.dirname(file_path)
out_name = os.path.basename(os.path.splitext(file_path)[0])
# Pick the right SWF file depending on desired sandbox model
prev_sandbox = int(o('preview_sandbox', 1))
if prev_sandbox == 1:
viewer_name = 'viewer_l.swf'
else:
viewer_name = 'viewer_n.swf'
shutil.copyfile(os.path.join(viewer_path, viewer_name), os.path.join(out_path, 'viewer.swf'))
shutil.copyfile(os.path.join(viewer_path, 'swfobject.js'), os.path.join(out_path, 'swfobject.js'))
html_template = os.path.join(viewer_path, 'template.html')
html_output = os.path.splitext(file_path)[0] + '.html'
# TODO: Fetch color from options
bg_color = '000000'
with open(html_template, 'r') as html_in:
with open(html_output, 'w') as html_out:
for line in html_in:
line = line.replace('%NAME%', out_name)
line = line.replace('%COLOR%', bg_color)
html_out.write(line)
try:
# Windows?
os.startfile(html_output)
except AttributeError:
# Mac OS X
subprocess.call(['open', html_output])
def defaultExtension(self):
return 'awd'
def haveWriteMethod(self):
return True
def parse_opts(self, opt_str):
if opt_str[0]==';':
opt_str=opt_str[1:]
fields = re.split('(?<!\\\)&', str(opt_str))
return dict([ re.split('(?<!\\\)=', pair) for pair in fields ])
def read_sequences(self, seq_path, base_path):
sequences = []
if seq_path is not None:
if not os.path.isabs(seq_path):
# Look for this file in a list of different locations,
# and use the first one in which it exists.
existed = False
bases = [
mc.workspace(q=True, rd=True),
os.path.join(mc.workspace(q=True, rd=True), mc.workspace('mayaAscii', q=True, fre=True)),
os.path.join(mc.workspace(q=True, rd=True), mc.workspace('AWD2', q=True, fre=True)),
base_path
]
for base in bases:
new_path = os.path.join(base, seq_path)
print('Looking for sequence file in %s' % new_path)
if os.path.exists(new_path) and os.path.isfile(new_path):
existed = True
seq_path = new_path
break
if not existed:
mc.warning('Could not find sequence file "%s. Will not export animation."' % seq_path)
return []
try:
with open(seq_path, 'r') as seqf:
lines = seqf.readlines()
for line in lines:
# Skip comments
if line[0] == '#':
continue
line_fields = re.split('[^a-zA-Z0-9]', line.strip())
sequences.append((line_fields[0], int(line_fields[1]), int(line_fields[2])))
except:
pass
return sequences
def ftCreator():
return OpenMayaMPx.asMPxPtr( MayaAWDFileTranslator() )
def initializePlugin(mobject):
mplugin = OpenMayaMPx.MFnPlugin(mobject, 'Away3D', '1.0')
stat = mplugin.registerFileTranslator('AWD2', 'none', ftCreator, 'MayaAWDExporterUI')
return stat
def uninitializePlugin(mobject):
mplugin = OpenMayaMPx.MFnPlugin(mobject)
stat = mplugin.deregisterFileTranslator('AWD2')
return stat
class MayaAWDBlockCache:
'''A cache of already created AWD blocks, and their connection to
nodes in the Maya DAG. The cache should always be checked before
creating a blocks, so that blocks can be reused within the file
when possible.'''
def __init__(self):
self.__cache = []
def get(self, path):
block = None
for item in self.__cache:
if item[0] == path:
block = item[1]
break
return block
def add(self, path, block):
if self.get(path) is None:
self.__cache.append((path, block))
class MayaAWDExporter:
def __init__(self, file, compression, wide_geom=False, wide_mtx=False):
self.file = file
self.block_cache = MayaAWDBlockCache()
self.skeleton_paths = []
self.joint_indices = {}
self.mesh_vert_indices = {}
self.include_attr = False
self.include_geom = False
self.include_scene = False
self.flatten_untransformed = False
self.replace_exrefs = False
self.include_uvanim = False
self.include_skelanim = False
self.include_skeletons = False
self.include_materials = False
self.include_cameras = False
self.include_lights = False
self.embed_textures = False
self.animation_sequences = []
self.has_skelanim = False
self.awd = AWD(compression=compression, wide_geom=wide_geom, wide_mtx=wide_mtx)
def export(self, selection):
# Assume that bind pose is on frame 1
om.MGlobal.viewFrame(0)
self.export_scene()
if self.include_skeletons:
self.export_skeletons()
if self.include_skelanim and self.has_skelanim:
self.export_animation(self.animation_sequences)
self.awd.flush(self.file)
def export_scene(self):
dag_it = om.MItDag(om.MItDag.kDepthFirst)
while not dag_it.isDone():
visible = False
try:
attr0 = '%s.visibility' % dag_it.partialPathName()
attr1 = '%s.ovv' % dag_it.partialPathName()
visible = mc.getAttr(attr0) and mc.getAttr(attr1)
except:
pass
if visible:
if dag_it.currentItem().hasFn(om.MFn.kTransform):
transform = dag_it.fullPathName()
print('')
print('================================================')
print('export %s' % dag_it.fullPathName())
print('================================================')
def find_nearest_cached_ancestor(child_dag_fn):
if child_dag_fn.parentCount() > 0:
parent_dag_fn = om.MFnDagNode(child_dag_fn.parent(0))
print('looking in cache for "%s"' % parent_dag_fn.fullPathName())
awd_parent = self.block_cache.get(parent_dag_fn.fullPathName())
if awd_parent is not None:
return awd_parent
else:
return find_nearest_cached_ancestor(parent_dag_fn)
else:
return None
dag_fn = om.MFnDagNode(dag_it.currentItem())
awd_parent = find_nearest_cached_ancestor(dag_fn)
shapes = mc.listRelatives(transform, s=True, f=True)
if shapes is not None:
shape = shapes[0]
api_type = mc.nodeType(shape, api=True)
if api_type == 'kMesh':
self.export_mesh(transform, shape, awd_parent)
elif api_type == 'kCamera' and self.include_cameras:
# Cameras for some reason are "shapes" in Maya
self.export_camera(transform, awd_parent)
elif not dag_it.currentItem().hasFn(om.MFn.kJoint):
# Container!
mtx = mc.xform(transform, q=True, m=True)
#Skip this container if untransformed and transformation is identity
id_mtx = [1,0,0,0, 0,1,0,0, 0,0,1,0, 0,0,0,1]
if not (self.flatten_untransformed and mtx == id_mtx):
ctr = AWDContainer(name=dag_it.partialPathName(), transform=self.mtx_list2awd(mtx))
print('saving in cache with id %s' % transform)
self.block_cache.add(transform, ctr)
if awd_parent is not None:
awd_parent.add_child(ctr)
else:
self.awd.add_scene_block(ctr)
self.set_attributes(transform, ctr)
else:
if dag_it.fullPathName(): # Not root
# Stop iterating this branch of the tree
dag_it.prune()
print('skipping invisible %s' % dag_it.fullPathName())
dag_it.next()
def export_camera(self, transform, awd_parent):
mtx = mc.xform(transform, q=True, m=True)
cam = AWDCamera(self.get_name(transform), AWDMatrix3x4(mtx))
cam.type = CAM_FREE
cam.lens = LENS_PERSPECTIVE
cam.fov = mc.camera(transform, q=True, vfv=True)
if awd_parent is not None:
awd_parent.add_child(cam)
else:
self.awd.add_scene_block(cam)
def export_skeletons(self):
dag_it = om.MItDependencyNodes(om.MFn.kSkinClusterFilter)
while not dag_it.isDone():
obj = dag_it.thisNode()
joints = om.MDagPathArray()
skin_fn = omanim.MFnSkinCluster(obj)
num_joints = skin_fn.influenceObjects(joints)
# Loop through joints and look in block cache whether
# a skeleton for this joint has been exported. If not,
# we will ignore this binding altogether.
skel = None
#print('found skin cluster for %s!' % skel)
for i in range(num_joints):
#print('affected joint: %s' % joints[i].fullPathName())
skel = self.block_cache.get(self.get_skeleton_root(joints[i].fullPathName()))
if skel is not None:
break
# Skeleton was found
if skel is not None:
#print('found skeleton in cache!')
#print('num joints: %d' % num_joints)
# Loop through meshes that are influenced by this
# skeleton, and add weight stream to their mesh data
num_geoms = skin_fn.numOutputConnections()
#print('num geoms: %d' % num_geoms)
for i in range(num_geoms):
skin_path = om.MDagPath()
skin_fn.getPathAtIndex(i, skin_path)
vert_it = om.MItMeshVertex(skin_path)
#print('skin obj: %s' % skin_path.fullPathName())
# Check whether a mesh data for this geometry has
# been added to the block cache. If not, bindings
# for this mesh can be ignored.
md = self.block_cache.get(self.get_name(skin_path.fullPathName()))
if md is not None:
#print('found mesh in cache!')
weight_data = []
index_data = []
self.has_skelanim = True
while not vert_it.isDone():
comp = vert_it.currentItem()
weights = om.MDoubleArray()
weight_objs = []
#script_util = om.MScriptUtil()
for ii in range(num_joints):
skin_fn.getWeights(skin_path, comp, ii, weights)
joint_name = joints[ii].fullPathName()
joint_idx = self.joint_indices[joint_name.split('|')[-1]]
weight_objs.append( (joint_idx, weights[0]) )
def comp_weight_objs(wo0, wo1):
if wo0[1] > wo1[1]: return -1
else: return 1
weight_objs.sort(comp_weight_objs)
# Normalize top weights
weight_objs = weight_objs[0:self.joints_per_vert]
sum_obj = reduce(lambda w0,w1: (0, w0[1]+w1[1]), weight_objs)
if sum_obj[1] > 0.0:
weight_objs = map(lambda w: (w[0], w[1] / sum_obj[1]), weight_objs)
# Add more empty weight objects if too few
if len(weight_objs) != self.joints_per_vert:
weight_objs.extend([(0,0)] * (self.joints_per_vert - len(weight_objs)))
for w_obj in weight_objs:
index_data.append(w_obj[0])
weight_data.append(w_obj[1])
vert_it.next()
weight_stream = []
index_stream = []
# This list contains the old-index of each vertex in the AWD vertex stream
vert_indices = self.mesh_vert_indices[skin_path.fullPathName()]
for idx in vert_indices:
start_idx = idx*self.joints_per_vert
end_idx = start_idx + self.joints_per_vert
w_tuple = weight_data[start_idx:end_idx]
i_tuple = index_data[start_idx:end_idx]
weight_stream.extend(w_tuple)
index_stream.extend(i_tuple)
if len(md) == 1:
print('Setting streams!')
sub = md[0]
sub.add_stream(pyawd.geom.STR_JOINT_WEIGHTS, weight_stream)
sub.add_stream(pyawd.geom.STR_JOINT_INDICES, index_stream)
else:
print('skinning not implemented for meshes with <> 1 sub-mesh')
dag_it.next()
def export_animation(self, sequences):
#TODO: Don't hard-code these.
#animated_materials = [ 'MAT_BlueEye_L', 'MAT_BlueEye_R' ]
#animated_materials = [ 'MAT_BrownEye_L', 'MAT_BrownEye_R' ]
animated_materials = []
for seq in sequences:
frame_idx = seq[1]
end_frame = seq[2]
print('exporting sequence "%s" (%d-%d)' % seq)
if len(self.skeleton_paths) > 0:
anim = AWDSkeletonAnimation(seq[0])
self.awd.add_skeleton_anim(anim)
uvanims = []
for mat in animated_materials:
uvanim = AWDUVAnimation(mat.replace('MAT', 'UVANIM')+'_'+seq[0])
uvanims.append(uvanim)
self.awd.add_uv_anim(uvanim)
while frame_idx <= end_frame:
om.MGlobal.viewFrame(frame_idx)
self.sample_materials(animated_materials, uvanims)
for skeleton_path in self.skeleton_paths:
def get_all_transforms(joint_path, list):
mtx_list = mc.xform(joint_path, q=True, m=True)
list.append( self.mtx_list2awd(mtx_list))
children = mc.listRelatives(joint_path, type='joint')
if children is not None:
for child in children:
get_all_transforms(child, list)
skel_pose = AWDSkeletonPose()
all_transforms = []
get_all_transforms(skeleton_path, all_transforms)
for tf in all_transforms:
skel_pose.add_joint_transform(tf)
#TODO: Don't hard-code duration
anim.add_frame(skel_pose, 40)
self.awd.add_skeleton_pose(skel_pose)
# Move to next frame
frame_idx += 1
def export_mesh(self, transform, shape, awd_ctr):
try:
mtx = mc.xform(transform, q=True, m=True)
except:
print('skipping invalid %s' % transform)
tf_name = self.get_name(transform)
sh_name = self.get_name(shape)
tf_is_ref = mc.referenceQuery(transform, inr=True)
sh_is_ref = mc.referenceQuery(shape, inr=True)
if (tf_is_ref or sh_is_ref) and self.replace_exrefs:
# This is an external reference, and it should be
# replaced with an empty container in the AWD file
ctr = AWDContainer(name=tf_name, transform=AWDMatrix3x4(mtx))
self.set_attributes(transform, ctr)
self.block_cache.add(transform, ctr)
if awd_ctr is not None:
awd_ctr.add_child(ctr)
else:
self.awd.add_scene_block(ctr)
else:
md = self.block_cache.get(sh_name)
if md is None:
print('Creating mesh data %s' % sh_name)
md = AWDTriGeom(sh_name)
md.bind_matrix = AWDMatrix3x4(mtx)
self.export_mesh_data(md, shape)
self.awd.add_tri_geom(md)
self.block_cache.add(sh_name, md)
inst = AWDMeshInst(md, tf_name, self.mtx_list2awd(mtx))
self.set_attributes(transform, inst)
# Look for materials
if self.include_materials:
self.export_materials(transform, inst)
self.block_cache.add(transform, inst)
if awd_ctr is not None:
awd_ctr.add_child(inst)
else:
self.awd.add_scene_block(inst)
if self.include_skeletons:
history = mc.listHistory(transform)
clusters = mc.ls(history, type='skinCluster')
if len(clusters) > 0:
#TODO: Deal with multiple clusters?
sc = clusters[0]
influences = mc.skinCluster(sc, q=True, inf=True)
if len(influences) > 0:
skel_path = self.get_skeleton_root(influences[0])
if self.block_cache.get(skel_path) is None:
self.export_skeleton(skel_path)
def export_materials(self, transform, awd_inst):
sets = mc.listSets(object=transform, t=1, ets=True)
if sets is not None:
for set in sets:
if mc.nodeType(set)=='shadingEngine':
tex = None
mat = None
mat_his = mc.listHistory(set)
for state in mat_his:
state_type = mc.nodeType(state)
if state_type == 'lambert':
mat = self.block_cache.get(state)
if mat is None:
mat = AWDMaterial(AWDMaterial.BITMAP, name=self.get_name(state))
self.awd.add_material(mat)
self.block_cache.add(state, mat)
print('created material')
if self.alpha_blending or self.alpha_threshold > 0.0:
# Check if transparency is an input (rather than scalars)
# in which case the material needs to be marked as transparent,
# to indicate that the texture's alpha channel should be used.
tr_input = mc.connectionInfo('%s.it' % state, isDestination=True)
if tr_input:
if self.alpha_threshold > 0.0:
mat.alpha_threshold = self.alpha_threshold
else:
mat.alpha_blending = True
awd_inst.materials.append(mat)
print('adding material ' + state)
# Only check the first file, which will likely be the color input.
# TODO: This needs to be solved in a prettier way for normal maps
# and other inputs like that.
elif state_type == 'file' and tex is None:
tex = self.block_cache.get(state)
if tex is None:
tex_abs_path = str(mc.getAttr(state+'.fileTextureName'))
if self.embed_textures:
tex = AWDBitmapTexture(AWDBitmapTexture.EMBED, name=self.get_name(state))
tex.embed_file(tex_abs_path)
print('embedding %s' % tex_abs_path)
else:
tex = AWDBitmapTexture(AWDBitmapTexture.EXTERNAL, name=self.get_name(state))
tex.url = mc.workspace(pp=tex_abs_path)
self.awd.add_texture(tex)
self.block_cache.add(state, tex)
print('created texture')
if mat is not None:
mat.texture = tex
elif state_type == 'place2dTexture' and mat is not None:
# Determine from place2dTexture node whether
# this material should repeat/wrap
rep_uv = mc.getAttr('%s.re' % state)[0]
if rep_uv[0] != 1.0 or rep_uv[1] != 1.0:
mat.repeat = True
elif mc.getAttr(state+'.wu') or mc.getAttr(state+'.wv'):
mat.repeat = True
def sample_materials(self, animated_materials, uvanims):
idx = 0
for mat in animated_materials:
pt = None
mat_his = mc.listHistory(mat)
#print('sampling mat', mat)
uvanim = uvanims[idx]
# Find most recent place2DTexture
for state in mat_his:
if mc.nodeType(state) == 'place2dTexture':
pt = state
break
t = mc.getAttr(pt+'.tf')[0]
#TODO: Don't hard-code duration
uvanim.add_frame( AWDMatrix2x3([ 1, 0, 0, 1, -t[0], t[1] ]), 40)
idx += 1
def export_skeleton(self, root_path):
skel = AWDSkeleton(name=root_path)
joints = []
def create_joint(joint_path, world_mtx=None):
dag_path = self.get_dag_from_path(joint_path)
tf_fn = om.MFnTransform(dag_path.node())
tf = tf_fn.transformation()
joint_wm = tf.asMatrix()
if world_mtx is not None:
joint_wm = joint_wm * world_mtx
ibm = joint_wm.inverse()
awd_mtx = self.mtx_maya2awd(ibm)
name = self.get_name(joint_path)
joint = AWDSkeletonJoint(name=name, inv_bind_mtx=awd_mtx)
self.joint_indices[joint_path] = len(joints)
print('added joint %s as idx %d' % (joint_path, len(joints)))
joints.append(name)
children = mc.listRelatives(joint_path, type='joint')
print('JOINT CHILDREN: %s', str(children))
if children is not None:
for child_path in children:
joint.add_child_joint( create_joint(child_path, joint_wm) )
return joint
skel.root_joint = create_joint(root_path)
self.awd.add_skeleton(skel)
self.block_cache.add(root_path, skel)
self.skeleton_paths.append(root_path)
def get_skeleton_root(self, joint_path):
current = joint_path
parent = mc.listRelatives(current, p=True)
while parent:
current = parent
parent = mc.listRelatives(current, p=True)
if isinstance(current, list):
current = current[0]
return str(current)
def get_dag_from_path(self, path):
list = om.MSelectionList()
list.add(path)
dag_path = om.MDagPath()
list.getDagPath(0, dag_path, om.MObject())
return dag_path
def export_mesh_data(self, md, shape_path):
dag_path = self.get_dag_from_path(shape_path)
if dag_path.hasFn(om.MFn.kMesh):
exp_vert_list = []
def get_uvs(vert_it, face_idx):
us = om.MFloatArray()
vs = om.MFloatArray()
uvis = om.MIntArray()
# TODO: Deal with this failing (missing UVs)
vert_it.getUVs(us, vs, uvis)
for i in range(len(uvis)):
if uvis[i] == face_idx:
return (us[i],vs[i])
print('NO UV FOUND!!!!! WHY!!!!!??')
return (0,0)
def get_vnormal(shape, vert_itx, face_idx):
vec = om.MVector()
attr = '%s.vtxFace[%d][%d]' % (shape, vert_itx, face_idx)
vec = mc.polyNormalPerVertex(attr, q=True, xyz=True)
return vec
benchmark_start()
print('getting mesh data for %s' % dag_path.fullPathName())
print('type: %s' % dag_path.node().apiTypeStr())
vert_it = om.MItMeshVertex(dag_path.node())
poly_it = om.MItMeshPolygon(dag_path.node())
while not poly_it.isDone():
tri_inds = om.MIntArray()
tri_points = om.MPointArray()
poly_index = poly_it.index()
idx_triple = []
poly_it.getTriangles(tri_points, tri_inds)
for i in range(tri_inds.length()):
vert_index = tri_inds[i]
pidx_util = om.MScriptUtil()
vert_it.setIndex(vert_index, pidx_util.asIntPtr())
u,v = get_uvs(vert_it, poly_index)
normal = get_vnormal(shape_path, vert_index, poly_index)
pos = vert_it.position()
exp_vert_list.append(
[ vert_index, poly_index, pos[0], pos[1], pos[2], u, v, normal[0], normal[1], normal[2] ])
poly_it.next()
print('- Raw (expanded) data list created')
benchmark_print()
# Store this so binding (joint index) data can be
# put into the right place of the new vertex list
vert_indices = []
self.mesh_vert_indices[dag_path.fullPathName()] = vert_indices
vertices = []
indices = []
uvs = []
normals = []
exp_vert_inds = {}
def has_vert(haystack, needle):
idx = 0
if needle[0] in exp_vert_inds:
for v_idx in exp_vert_inds[needle[0]]:
v = haystack[v_idx]
correct = True
for prop in range(2, 10):
if needle[prop] != v[prop]:
correct = False
break
idx += 1
return -1
merged_vertices = []
print('- Creating condensed list')
benchmark_start()
for v in exp_vert_list:
idx = has_vert(merged_vertices, v)
if idx >= 0:
# Already has vertex
indices.append(idx)
else:
# Store this for binding data
vert_indices.append(v[0])
# This vertex will be added into the expanded list of vertices,
# which can get very large. To enable fast look-up, we map it's
# original index to that in the expanded list
vert_index = v[0]
if vert_index not in exp_vert_inds:
exp_vert_inds[vert_index] = []
exp_vert_inds[vert_index].append(len(merged_vertices))
indices.append(len(merged_vertices))
merged_vertices.append(v)
for v in merged_vertices:
# Add vertex and index
vertices.append(v[2]) # X
vertices.append(v[3]) # Y
vertices.append(-v[4]) # Z (inverted)
uvs.append(v[5]) # U
uvs.append(1-v[6]) # V
normals.append(v[7]) # Normal X
normals.append(v[8]) # Normal Y
normals.append(-v[9]) # Normal Z (inverted)
benchmark_print()
print('- DONE! Flipping windings')
benchmark_start()
# Flip windings
for idx in range(1, len(indices), 3):
tmp = indices[idx]
indices[idx] = indices[idx+1]
indices[idx+1] = tmp
benchmark_print()
print('- Creating sub-mesh')
sub = AWDSubGeom()
sub.add_stream(pyawd.geom.STR_VERTICES, vertices)
sub.add_stream(pyawd.geom.STR_TRIANGLES, indices)
sub.add_stream(pyawd.geom.STR_UVS, uvs)
sub.add_stream(pyawd.geom.STR_VERTEX_NORMALS, normals)
print('- Adding sub-mesh')
md.add_sub_geom(sub)
# Store mesh data block to block cache
def set_attributes(self, dag_path, awd_elem):
if self.include_attr:
extra_attributes = mc.listAttr(dag_path, ud=True)
if extra_attributes is not None:
for attr in extra_attributes:
val = mc.getAttr('%s.%s' % (dag_path, attr))
awd_elem.attributes[self.user_ns][str(attr)] = val
def get_name(self, dag_path):
# TODO: Deal with unicode names. In pyawd?
return str(dag_path.split('|')[-1])
def mtx_list2awd(self, mtx):
mtx_list = [1,0,0,0,1,0,0,0,1,0,0,0]
mtx_list[0] = mtx[0]
mtx_list[1] = mtx[1]
mtx_list[2] = -mtx[2]
mtx_list[3] = mtx[4]
mtx_list[4] = mtx[5]
mtx_list[5] = -mtx[6]
mtx_list[6] = -mtx[8]
mtx_list[7] = -mtx[9]
mtx_list[8] = mtx[10]
mtx_list[9] = mtx[12]
mtx_list[10] = mtx[13]
mtx_list[11] = -mtx[14]
return AWDMatrix3x4(mtx_list)
def mtx_maya2awd(self, mtx):
mtx_list = []
for i in range(16):
row_idx = math.floor(i/4)
col_idx = i%4
mtx_list.append(mtx(int(row_idx), int(col_idx)))
#mtx_list[1] *= -1
#mtx_list[2] *= -1
#mtx_list[3] *= -1
#mtx_list[4] *= -1
#mtx_list[8] *= -1
#print(mtx_list[0:4])
#print(mtx_list[4:8])
#print(mtx_list[8:12])
#print(mtx_list[12:])
return self.mtx_list2awd(mtx_list)
| Python |
import bpy
import bpy.path
from bpy.props import *
from bpy_extras.io_utils import ExportHelper
__all__ = ['awd_export']
bl_info = {
'name': 'Away3D AWD2 format',
'author': 'Richard Olsson',
'blender': (2,5,9),
'api': 35622,
'location': 'File > Export',
'description': 'Export AWD2 files',
'warning': '',
'category': 'Import-Export'
}
class ExportAWD(bpy.types.Operator, ExportHelper):
bl_idname = 'away3d.awd_export'
bl_label = 'Export AWD'
bl_options = {'PRESET'}
filename_ext = '.awd'
filter_glob = StringProperty(
default = '*.awd',
options = {'HIDDEN'})
include_materials = BoolProperty(
name = 'Include materials',
default = True)
embed_textures = BoolProperty(
name = 'Embed textures',
default = True)
include_attr = BoolProperty(
name = 'Include attributes',
description = 'Export Blender custom properties as AWD user attributes',
default = True)
def draw(self, context):
layout = self.layout
layout.prop(self, 'include_materials')
layout.prop(self, 'embed_textures')
layout.prop(self, 'include_attr')
def execute(self, context):
from . import awd_export
kwds = self.as_keywords(ignore=('check_existing','filter_glob'))
print(kwds)
exporter = awd_export.AWDExporter()
exporter.export(context, **kwds)
return {'FINISHED'}
def menu_func_export(self, context):
self.layout.operator(ExportAWD.bl_idname, text="Away3D (.awd)")
def register():
bpy.utils.register_module(__name__)
bpy.types.INFO_MT_file_export.append(menu_func_export)
def unregister():
bpy.utils.unregister_module(__name__)
bpy.types.INFO_MT_file_export.remove(menu_func_export)
| Python |
import re
import os.path
import functools
import mathutils
from math import radians
import bpy
import pyawd
from pyawd.core import *
from pyawd.anim import *
from pyawd.scene import *
from pyawd.geom import *
from pyawd.material import *
from pyawd.utils.math import *
from pyawd.utils.geom import AWDGeomUtil
class AWDBlockCache(object):
'''A cache of already created AWD blocks, and their connection to
nodes in the Maya DAG. The cache should always be checked before
creating a blocks, so that blocks can be reused within the file
when possible.'''
def __init__(self):
self.__cache = []
def get(self, path):
block = None
for item in self.__cache:
if item[0] == path:
block = item[1]
break
return block
def add(self, path, block):
if self.get(path) is None:
self.__cache.append((path, block))
class AWDExporter(object):
def __init__(self):
self.block_cache = AWDBlockCache()
self.exported_skeletons = []
self.animation_sequences = []
self.exported_objects = []
#self.vertex_indices = {}
# TODO: Don't hard code these
self.compression = DEFLATE
self.user_ns = AWDNamespace('default')
def export(self, context, filepath='',
include_materials = True,
embed_textures = True,
include_attr = True):
self.context = context
self.include_materials = include_materials
self.embed_textures = embed_textures
self.include_attr = include_attr
self.awd = AWD(self.compression)
for o in self.context.scene.objects:
if o.type == 'EMPTY':
self.export_container(o)
elif o.type == 'MESH':
self.export_mesh(o)
elif o.type == 'ARMATURE':
self.export_skeleton(o)
# Loop through scene objects again and add either directly
# to the AWD document root or to it's parent if one exists.
# At this point, an AWD representation of the parent is
# guaranteed to have been created if included in the export.
for o in self.exported_objects:
block = self.block_cache.get(o)
if o.parent is not None:
if o.parent.type == 'ARMATURE':
self.extract_joint_weights(o)
if o.parent.parent is not None:
par_block = self.block_cache.get(o.parent.parent)
par_block.add_child(block)
else:
self.awd.add_scene_block(block)
else:
par_block = self.block_cache.get(o.parent)
par_block.add_child(block)
else:
self.awd.add_scene_block(block)
# Export animation sequences
# self.export_animation()
with open(filepath, 'wb') as f:
self.awd.flush(f)
def extract_joint_weights(self, o):
armature = o.parent
geom = o.data
skel = self.block_cache.get(armature)
# TODO: Don't hard code
joints_per_vert = 3
joint_weights = []
joint_indices = []
vert_indices = self.vertex_indices[geom.name]
for bl_vidx in vert_indices:
v = geom.vertices[bl_vidx]
weight_objs = []
for ge in v.groups:
group = o.vertex_groups[ge.group]
j_idx = skel.joint_index(name=group.name)
if j_idx is not None:
weight_objs.append((j_idx, ge.weight))
else:
weight_objs.append((0, 0))
# Normalize weights by slicing to the desired length, calculating
# the sum of all weights and then dividing all weights by that sum.
weight_objs = weight_objs[0:joints_per_vert]
sum_obj = functools.reduce(lambda w0,w1: (0, w0[1]+w1[1]), weight_objs)
weight_objs = [(w[0], w[1]/sum_obj[1]) for w in weight_objs]
# Add more empty weight objects if too few
if len(weight_objs) != joints_per_vert:
weight_objs.extend([(0,0)] * (joints_per_vert-len(weight_objs)))
for w_obj in weight_objs:
joint_indices.append(w_obj[0])
joint_weights.append(w_obj[1])
# Add newly assembled streams
md = self.block_cache.get(geom)
md[0].add_stream(STR_JOINT_WEIGHTS, joint_weights)
md[0].add_stream(STR_JOINT_INDICES, joint_indices)
def export_container(self, o):
mtx = self.mtx_bl2awd(o.matrix_local)
ctr = AWDContainer(name=o.name, transform=mtx)
self.block_cache.add(o, ctr)
self.exported_objects.append(o)
if self.include_attr:
self.set_attributes(o, ctr)
def export_animation(self):
# Unlock from bind pose
for o in self.exported_skeletons:
o.data.pose_position = 'POSE'
for seq in self.animation_sequences:
skel_anims = {}
for o in self.exported_skeletons:
skel_anim = AWDSkeletonAnimation(seq[0])
skel_anims[o.name] = skel_anim
self.awd.add_skeleton_anim(skel_anim)
print('Exporting sequences %s (%d-%d)' % seq)
for frame in range(seq[1], seq[2]):
self.context.scene.frame_set(frame)
for o in self.exported_skeletons:
skel_pose = AWDSkeletonPose()
for bp in o.pose.bones:
mtx = self.mtx_bl2awd(bp.matrix_basis)
skel_pose.add_joint_transform(mtx)
# Pad with an identity transform to match the number
# of joints (for first joint both head and tail were
# included when skeleton was created.)
skel_pose.add_joint_transform(
self.mtx_bl2awd(mathutils.Matrix()))
self.awd.add_skeleton_pose(skel_pose)
skel_anims[o.name].add_frame(skel_pose, 40)
def export_mesh(self, o):
md = self.block_cache.get(o.data)
if md is None:
print('Creating mesh %s' % o.data.name)
# If bound to a skeleton, set that skeleton in bind pose
# to make sure that the geometry is defined in that state
if o.parent is not None and o.parent.type == 'ARMATURE':
o.parent.data.pose_position = 'REST'
md = self.build_mesh_data(o.data)
self.awd.add_mesh_data(md)
self.block_cache.add(o.data, md)
mtx = self.mtx_bl2awd(o.matrix_local)
inst = AWDMeshInst(data=md, name=o.name, transform=mtx)
self.block_cache.add(o, inst)
if self.include_materials:
print('Checking materials for %s' % o.name)
awd_mat = None
for ms in o.material_slots:
awd_tex = None
bl_mat = ms.material
if bl_mat is None or bl_mat.type != 'SURFACE':
continue # Ignore non-surface materials for now
awd_mat = self.block_cache.get(bl_mat)
if awd_mat is None:
for ts in bl_mat.texture_slots:
# Skip empty slots
if ts is None:
continue
if ts.use_map_color_diffuse:
# Main input!
bl_tex = ts.texture
awd_tex = self.block_cache.get(bl_tex)
if awd_tex is None:
if bl_tex.type == 'IMAGE' and bl_tex.image is not None:
bl_img = bl_tex.image
# BitmapMaterial
if self.embed_textures:
tex_type = None
if bl_img.file_format == 'PNG':
tex_type = AWDTexture.EMBED_PNG
elif bl_img.file_format == 'JPG':
tex_type = AWDTexture.EMBED_JPG
awd_tex = AWDTexture(tex_type, name=bl_tex.name)
awd_tex.embed_file( bpy.path.abspath( bl_img.filepath ))
else:
awd_tex = AWDTexture(AWDTexture.EXTERNAL, name=bl_tex.name)
awd_tex.url = bl_img.filepath
self.block_cache.add(bl_tex, awd_tex)
self.awd.add_texture(awd_tex)
break
print('Found texture to create material?')
if awd_tex is not None:
awd_mat = AWDMaterial(AWDMaterial.BITMAP, name=bl_mat.name)
awd_mat.texture = awd_tex
if self.include_attr:
self.set_attributes(bl_mat, awd_mat)
print('Yes! Created material!')
self.block_cache.add(bl_mat, awd_mat)
self.awd.add_material(awd_mat)
if awd_mat is not None:
inst.materials.append(awd_mat)
if self.include_attr:
self.set_attributes(o, inst)
self.exported_objects.append(o)
def export_skeleton(self, o):
root_joint = None
# Use bind pose
o.data.pose_position = 'REST'
for b in o.data.bones:
joint = AWDSkeletonJoint(b.name)
joint.inv_bind_mtx = self.mtx_bl2awd(
mathutils.Matrix.Translation(b.tail_local).inverted())
if root_joint is None:
root_joint = AWDSkeletonJoint('root')
root_joint.add_child_joint(joint)
root_joint.inv_bind_mtx = self.mtx_bl2awd(
mathutils.Matrix.Translation(b.head_local).inverted())
else:
p_block = self.block_cache.get(b.parent)
if p_block is not None:
p_block.add_child_joint(joint)
self.block_cache.add(b, joint)
if root_joint is not None:
skel = AWDSkeleton(name=o.name)
skel.root_joint = root_joint
self.awd.add_skeleton(skel)
self.block_cache.add(o, skel)
self.exported_skeletons.append(o)
def build_mesh_data(self, geom):
vertex_edges = {}
geom_util = AWDGeomUtil()
# Create lookup table for edges by vertex, to use
# when determining if a vertex is on a hard edge
for e in geom.edges:
for v in e.vertices:
if v not in vertex_edges:
vertex_edges[v] = []
vertex_edges[v].append(e)
tex_data = None
has_uvs = False
if len(geom.uv_textures):
has_uvs = True
tex_data = geom.uv_textures[0].data
# Generate expanded list of vertices
for f in geom.faces:
inds_in_face = [0,2,1]
if len(f.vertices)==4:
inds_in_face.extend((0,3,2))
for idx in inds_in_face:
vert = geom.vertices[f.vertices[idx]]
edges = vertex_edges[vert.index]
has_hard_edge = False
for e in edges:
if e.use_edge_sharp:
has_hard_edge = True
break
uv = None
if tex_data is not None and len(tex_data)>0:
# TODO: Implement secondary UV sets?
tex_face = tex_data[f.index]
uv = [tex_face.uv[idx][0], 1.0-tex_face.uv[idx][1]]
v = [vert.co.x, vert.co.z, vert.co.y]
n = [f.normal.x, f.normal.z, f.normal.y]
geom_util.append_vert_data(vert.index, v, uv, n, has_hard_edge)
# Find influences for all vertices
#for v0 in expanded_vertices:
# for v1 in expanded_vertices:
# angle = degrees(v0['normal'].angle(v1['normal']))
# if angle <= geom.auto_smooth_angle:
# v0['normal_influences'].append(v1['f'])
# v1['normal_influences'].append(v0['f'])
md = AWDMeshData(geom.name)
if geom.use_auto_smooth:
geom_util.normal_threshold = geom.auto_smooth_angle
geom_util.build_geom(md)
#md.add_sub_mesh(AWDSubMesh())
#md[0].add_stream(STR_VERTICES, vertices)
#md[0].add_stream(STR_TRIANGLES, indices)
#md[0].add_stream(STR_VERTEX_NORMALS, normals)
return md
def set_attributes(self, ob, awd_elem):
for key in ob.keys():
if (key != '_RNA_UI'):
print('setting prop %s.%s=%s' % (ob.name, key, ob[key]))
awd_elem.attributes[self.user_ns][str(key)] = str(ob[key])
def mtx_bl2awd(self, mtx):
# Decompose matrix
pos, rot, scale = mtx.decompose()
# Swap translation axes
tmp = pos.y
pos.y = pos.z
pos.z = tmp
# Swap rotation axes
tmp = rot.y
rot.x = -rot.x
rot.y = -rot.z
rot.z = -tmp
# Recompose matrix
mtx = mathutils.Matrix.Translation(pos).to_4x4() * rot.to_matrix().to_4x4()
# Create list from rows
rows = list(mtx)
mtx_list = []
mtx_list.extend(list(rows[0]))
mtx_list.extend(list(rows[1]))
mtx_list.extend(list(rows[2]))
mtx_list.extend(list(rows[3]))
# Apply swapped-axis scale
mtx_list[0] *= scale.x
mtx_list[5] *= scale.y
mtx_list[10] *= scale.z
#print(mtx_list[0:4])
#print(mtx_list[4:8])
#print(mtx_list[8:12])
#print(mtx_list[12:])
return AWDMatrix4x4(mtx_list)
if __name__ == '__main__':
def read_sequences(seq_path, base_path):
sequences = []
if seq_path is not None:
if not os.path.isabs(seq_path):
# Look for this file in a list of different locations,
# and use the first one in which it exists.
existed = False
bases = [
bpy.path.abspath('//'),
base_path
]
for base in bases:
new_path = os.path.join(base, seq_path)
print('Looking for sequence file in %s' % new_path)
if os.path.exists(new_path) and os.path.isfile(new_path):
existed = True
seq_path = new_path
break
if not existed:
#mc.warning('Could not find sequence file "%s. Will not export animation."' % seq_path)
return []
try:
with open(seq_path, 'r') as seqf:
lines = seqf.readlines()
for line in lines:
# Skip comments
if line[0] == '#':
continue
line_fields = re.split('[^a-zA-Z0-9]', line.strip())
sequences.append((line_fields[0], int(line_fields[1]), int(line_fields[2])))
except:
raise
pass
return sequences
exporter = BlenderAWDExporter(bpy.path.abspath('//blendout.awd'))
exporter.animation_sequences = read_sequences('sequences.txt', '.')
exporter.export()
| Python |
#!/usr/bin/env python
import sys
import getopt
import struct
import zlib
#from pyawd import core
BLOCKS = 0x1
GEOMETRY = 0x2
SCENE = 0x4
ANIMATION = 0x8
include = 0
offset = 0
indent_level = 0
wide_geom = False
wide_geom = True
BT_MESH_DATA = 1
BT_CONTAINER = 22
BT_MESH_INST = 23
BT_SKELETON = 101
BT_SKELPOSE = 102
BT_SKELANIM = 103
def printl(str=''):
global indent_level
pad = ' ' * indent_level * 2
print('%s%s' % (pad, str))
def print_header(data):
compressions = ('uncompressed', 'deflate (file-level)', 'lzma (file-level)')
header = struct.unpack_from('<BBHBI', data, 3)
if header[3] < len(compressions):
compression = compressions[header[3]]
else:
compression = '<error> %s' % hex(header[3])
printl('version: %d.%d' % (header[0], header[1]))
printl('compression: %s' % compression)
printl('body size: %d (%s)' % (header[4], hex(header[4])))
printl()
return (header[3], header[2] & 2, header[2] & 4)
def read_var_str(data, offs=0):
len = struct.unpack_from('<H', data, offs)
str = struct.unpack_from('%ds' % len[0], data, offs+2)
return str[0]
def print_properties(data):
global indent_level
offs = 0
printl()
props_len = struct.unpack_from('<I', data, offs)[0]
offs += 4
if props_len > 0:
printl('PROPERTIES: (%db)' % props_len)
props_end = offs + props_len
indent_level += 1
while (offs < props_end):
prop_key, prop_len = struct.unpack_from('<HH', data, offs)
offs += 4
prop_end = offs + prop_len
val_str = ''
while (offs < prop_end):
val_str += '%02x ' % struct.unpack_from('<B', data, offs)[0]
offs += 1
printl('%d: %s' % (prop_key, val_str))
indent_level -= 1
return offs
def print_user_attributes(data):
global indent_level
offs = 0
attr_len = struct.unpack_from('<I', data, offs)[0]
offs += 4
if attr_len > 0:
printl('USER ATTRIBUTES (%db)' % attr_len)
return offs
def print_skeleton(data):
global indent_level
name = read_var_str(data)
offs = 2+len(name)
num_joints = struct.unpack_from('<H', data, offs)[0]
offs += 2
printl('NAME: %s' % name)
printl('JOINTS: %d' % num_joints)
offs += print_properties(data[offs:])
indent_level += 1
joints_printed = 0
while offs < len(data) and joints_printed < num_joints:
joint_id, parent_id = struct.unpack_from('<HH', data, offs)
offs += 4
joint_name = read_var_str(data, offs)
printl('JOINT %s (id=%d, parent=%d)' % (
joint_name, joint_id, parent_id))
offs += (2 + len(joint_name))
mtx = read_mtx(data, offs)
indent_level += 1
print_matrix(mtx)
offs += 48
offs += 4 # (No properties)
offs += print_user_attributes(data[offs:])
indent_level -= 1
joints_printed += 1
indent_level -= 1
offs += print_user_attributes(data[offs:])
def print_skelpose(data):
global indent_level
offs = 0
pose_name = read_var_str(data, offs)
offs += (2 + len(pose_name))
num_joints = struct.unpack_from('<H', data, offs)[0]
offs += 2
printl('NAME: %s' % pose_name)
printl('NUM TRANSFORMS: %d' % num_joints)
offs += print_properties(data[offs:])
indent_level += 1
for j_idx in range(num_joints):
has_transform = struct.unpack_from('B', data, offs)[0]
printl('Transform')
indent_level += 1
offs += 1
if has_transform == 1:
mtx = read_mtx(data, offs)
print_matrix(mtx)
offs += 48
else:
indent_level += 1
printl('No transformation of this joint')
indent_level -= 1
indent_level -= 1
indent_level -= 1
offs += print_user_attributes(data[offs:])
def read_scene_data(data):
parent = struct.unpack_from('<I', data)[0]
matrix = read_mtx(data, 4)
name = read_var_str(data, 52)
return (parent, matrix, name, 54+len(name))
def print_container(data):
global indent_level
parent, matrix, name, offs = read_scene_data(data)
printl('NAME: %s' % name)
printl('PARENT ID: %d' % parent)
printl('TRANSFORM MATRIX:')
print_matrix(matrix)
def print_mesh_instance(data):
global indent_level
parent, matrix, name, offs = read_scene_data(data)
data_id = struct.unpack_from('<I', data, offs)[0]
printl('NAME: %s' % name)
printl('DATA ID: %d' % data_id)
printl('PARENT ID: %d' % parent)
printl('TRANSFORM MATRIX:')
print_matrix(matrix)
def read_mtx(data, offset):
if wide_mtx:
matrix = struct.unpack_from('<12d', data, offset)
else:
matrix = struct.unpack_from('<12f', data, offset)
return matrix
def print_matrix(matrix):
for i in range(0, 11, 3):
printl('%f %f %f' % (matrix[i], matrix[i+1], matrix[i+2]))
def print_mesh_data(data):
global indent_level
name = read_var_str(data)
offs = (2 + len(name)) # var str
num_subs = struct.unpack_from('<H', data, offs)[0]
offs += 2
printl('NAME: %s' % name)
printl('SUB-MESHES: %d' % num_subs)
offs += print_properties(data[offs:])
printl()
subs_printed = 0
indent_level += 1
while offs < len(data) and subs_printed < num_subs:
length = struct.unpack_from('<I', data, offs)[0]
offs += 4
printl('SUB-MESH')
indent_level += 1
printl('Length: %d' % length)
indent_level -= 1
offs += 4 # TODO: Read numeric properties
sub_end = offs + length
indent_level += 1
while offs < sub_end:
stream_types = ('', 'VERTEX', 'TRIANGLE', 'UV', 'VERTEX_NORMALS', 'VERTEX_TANGENTS', 'JOINT_INDICES', 'VERTEX_WEIGHTS')
type, data_type, str_len = struct.unpack_from('<BBI', data, offs)
offs += 6
if type < len(stream_types):
stream_type = stream_types[type]
if type == 1 or type == 3 or type==4 or type==5 or type==7:
if wide_geom:
elem_data_format = 'd'
else:
elem_data_format = 'f'
elem_print_format = '%f'
elif type == 2 or type == 6:
if wide_geom:
elem_data_format = 'I'
else:
elem_data_format = 'H'
elem_print_format = '%d'
else:
stream_type = '<error> %x' % type
printl('STREAM (%s)' % stream_type)
indent_level += 1
printl('Length: %d' % str_len)
str_end = offs + str_len
while offs < str_end:
element = struct.unpack_from('<%s' % elem_data_format, data, offs)
printl(elem_print_format % element[0])
offs += struct.calcsize(elem_data_format)
printl()
indent_level -= 1
subs_printed += 1
indent_level -= 1
indent_level -= 1
offs += print_user_attributes(data[offs:])
def print_next_block(data):
global indent_level
block_types = {}
block_types[BT_MESH_DATA] = 'MeshData'
block_types[BT_CONTAINER] = 'Container'
block_types[BT_MESH_INST] = 'MeshInst'
block_types[BT_SKELETON] = 'Skeleton'
block_types[BT_SKELPOSE] = 'SkeletonPose'
block_types[BT_SKELANIM] = 'SkeletonAnimation'
block_header = struct.unpack_from('<IBBBI', data, offset)
type = block_header[2]
flags = block_header[3]
length = block_header[4]
if type in block_types:
block_type = block_types[type]
else:
block_type = '<error> %s (%x)' % (type, type)
printl('BLOCK %s' % block_type)
indent_level += 1
printl('NS: %d, ID: %d' % (block_header[1], block_header[0]))
printl('Flags: %x' % flags)
printl('Length: %d' % length)
if type == BT_MESH_INST and include&SCENE:
printl()
print_mesh_instance(data[offset+11 : offset+11+length])
elif type == BT_CONTAINER and include &SCENE:
printl()
print_container(data[offset+11 : offset+11+length])
elif type == BT_MESH_DATA and include&GEOMETRY:
printl()
print_mesh_data(data[offset+11 : offset+11+length])
elif type == BT_SKELETON and include&ANIMATION:
printl()
print_skeleton(data[offset+11 : offset+11+length])
elif type == BT_SKELPOSE and include&ANIMATION:
printl()
print_skelpose(data[offset+11 : offset+11+length])
printl()
indent_level -= 1
return 11 + length
if __name__ == '__main__':
opts, files = getopt.getopt(sys.argv[1:], 'bgsax')
for opt in opts:
if opt[0] == '-b':
include |= BLOCKS
elif opt[0] == '-g':
include |= (GEOMETRY | BLOCKS)
elif opt[0] == '-s':
include |= (SCENE | BLOCKS)
elif opt[0] == '-a':
include |= (ANIMATION | BLOCKS)
elif opt[0] == '-x':
include = 0xffff
for file in files:
f = open(file, 'rb')
data = f.read()
printl(file)
indent_level += 1
compression, wide_geom, wide_mtx = print_header(data)
uncompressed_data = None
if compression == 0:
offset = 12
uncompressed_data = data
elif compression == 1:
offset = 0
data = data[12:]
uncompressed_data = zlib.decompress(data)
elif compression == 2:
import pylzma
offset = 0
uncompressed_len = struct.unpack_from('<I', data, 12)[0]
data = data[16:]
uncompressed_data = pylzma.decompress(data, uncompressed_len, uncompressed_len)
else:
print('unknown compression: %d' % compression)
sys.exit(-1)
if include & BLOCKS:
while offset < len(uncompressed_data):
offset += print_next_block(uncompressed_data)
| Python |
#!/usr/bin/env python
import os
import sys
import getopt
from distutils.core import setup, Extension
version_macro = ('PYTHON_VERSION', sys.version_info[0])
source_files = [
'src/util.cc',
'src/bcache.cc',
'src/prepgeom.cc',
'src/prepscene.cc',
'src/prepmaterial.cc',
'src/prepanim.cc',
'src/prepattr.cc',
'src/cpyawd.cc',
'src/AWDWriter.cc',
'src/utils/build_geom.cc'
]
def parse_bool(str):
if str=='0' or str.lower()=='false':
return False
else:
return True
use_libawd = True
opts, args = getopt.getopt(sys.argv[1:], None, ['use-libawd='])
for opt in opts:
if opt[0] == '--use-libawd':
use_libawd = parse_bool(opt[1])
ext_modules = []
if use_libawd:
ext_modules.append(Extension('pyawd.cpyawd',
libraries = [ 'awd' ],
include_dirs = [ 'include', '/usr/local/include' ],
define_macros = [ version_macro ],
sources = source_files ))
setup(name = 'pyawd',
version = '0.1.0',
description = 'Python package for dealing with Away3D AWD files',
ext_modules = ext_modules,
package_dir = { 'pyawd': 'src/pyawd', 'pyawd.utils': 'src/pyawd/utils' },
packages = [ 'pyawd', 'pyawd.utils' ]
)
| Python |
from pyawd import core
STR_VERTICES = 1
STR_TRIANGLES = 2
STR_UVS = 3
STR_VERTEX_NORMALS = 4
STR_VERTEX_TANGENTS = 5
STR_JOINT_INDICES = 6
STR_JOINT_WEIGHTS = 7
class AWDSubGeom:
def __init__(self):
self.__data_streams = []
def add_stream(self, type, data):
self.__data_streams.append((type,data))
def __len__(self):
return len(self.__data_streams)
def __getitem__(self, index):
return self.__data_streams[index]
class AWDTriGeom(core.AWDAttrElement, core.AWDBlockBase):
def __init__(self, name=''):
super(AWDTriGeom, self).__init__()
self.name = name
self.__sub_geoms = []
def add_sub_geom(self, sub):
self.__sub_geoms.append(sub)
def __len__(self):
return len(self.__sub_geoms)
def __getitem__(self, index):
return self.__sub_geoms[index]
| Python |
import struct
import sys
# Compression constants
UNCOMPRESSED = 0
DEFLATE = 1
LZMA = 2
class AWDBlockBase(object):
def __init__(self, type=0):
super(AWDBlockBase, self).__init__()
self.__type = 0
def write_block(self, file, addr):
file.write(struct.pack('>IBBI', addr, 0, self.__type, 0))
def write_body(self, file):
pass # To be overridden
class AWDNamespace(object):
def __init__(self, handle):
self.__handle = handle
def get_handle(self):
return self.__handle
class AWDMetaData(object):
def __init__(self):
import pyawd
self.encoder = 'PyAWD (%s)' % pyawd.backend[0]
self.encoder_version = '%d.%d.%d%s (%d.%d.%d%s)' % (
pyawd.version[1], pyawd.version[2], pyawd.version[3], pyawd.version[4],
pyawd.backend[1], pyawd.backend[2], pyawd.backend[3], pyawd.backend[4])
self.generator = None
self.generator_version = None
class AWDAttrElement(object):
class AWDAttrDict(object):
def __init__(self):
self.__nsdict = {}
def __getitem__(self, ns):
handle = ns.get_handle()
if handle not in self.__nsdict:
self.__nsdict[handle] = {}
return self.__nsdict[handle]
def __init__(self):
super(AWDAttrElement, self).__init__()
self.attributes = self.AWDAttrDict()
class AWD(object):
def __init__(self, compression=0, streaming=False, wide_geom=False, wide_mtx=False):
self.compression = compression
self.flags = 0
if streaming:
self.flags |= 1
if wide_geom:
self.flags |= 2
if wide_mtx:
self.flags |= 4
self.metadata = None
self.texture_blocks = []
self.material_blocks = []
self.tri_geom_blocks = []
self.uvanim_blocks = []
self.skelanim_blocks = []
self.skelpose_blocks = []
self.skeleton_blocks = []
self.scene_blocks = []
def flush(self, file=sys.stdout):
try:
from pyawd import cpyawd as io
except:
print("Using pure python for writing. Build PyAWD with --use-libawd=true")
print("to build using libawd for optimized writing performance.")
from pyawd import pyio as io
writer = io.AWDWriter()
writer.write(self, file)
def add_texture(self, block):
self.texture_blocks.append(block)
def add_material(self, block):
self.material_blocks.append(block)
def add_tri_geom(self, block):
self.tri_geom_blocks.append(block)
def add_scene_block(self, block):
self.scene_blocks.append(block)
def add_uv_anim(self, block):
self.uvanim_blocks.append(block)
def add_skeleton(self, block):
self.skeleton_blocks.append(block)
def add_skeleton_pose(self, block):
self.skelpose_blocks.append(block)
def add_skeleton_anim(self, block):
self.skelanim_blocks.append(block)
| Python |
class AWDGeomUtil(object):
def __init__(self):
self._vertices = []
self.normal_threshold = 0.0
def append_vert_data(self, index, pos, uv=None, norm=None, joint_wheights=None, joint_indices=None, force_hard = False):
if uv is None:
uv = [0,0]
if norm is None:
norm = [0,0,0]
v = (index, pos[0], pos[1], pos[2], uv[0], uv[1], norm[0], norm[1], norm[2], force_hard, joint_wheights, joint_indices)
self._vertices.append(v)
def build_geom(self, mesh_data):
from pyawd.cpyawd import util_build_geom
return util_build_geom(self._vertices, mesh_data, self.normal_threshold)
| Python |
class AWDMatrix3x4(object):
def __init__(self, raw_data=None):
self.raw_data = raw_data
if self.raw_data is None:
self.raw_data = [1,0,0,0,0,1,0,0,0,0,1,0,0,0,0,1]
class AWDMatrix2x3(object):
def __init__(self, raw_data=None):
self.raw_data = raw_data
if self.raw_data is None:
self.raw_data = [1,0,0,1,0,0]
class AWDVector2(object):
def __init__(self, raw_data=None):
self.raw_data = raw_data
if self.raw_data is None:
self.raw_data = [0,0]
class AWDVector3(object):
def __init__(self, raw_data=None):
self.raw_data = raw_data
if self.raw_data is None:
self.raw_data = [0, 0, 0]
| Python |
__all__ = [ 'geom', 'math' ]
| Python |
from pyawd import utils
from pyawd import core
class AWDSceneBlockBase(core.AWDAttrElement, core.AWDBlockBase):
def __init__(self, name='', transform=None):
super(AWDSceneBlockBase, self).__init__()
self.__children = []
self.__parent = None
self.name = name
self.transform = transform
if self.transform is None:
self.transform = utils.AWDMatrix4x4()
def get_parent(self):
return self.__parent
def set_parent(self, parent):
if self.__parent is not None:
self.__parent.remove_child(self)
self.__parent = parent
if self.__parent is not None:
self.__parent.add_child(self)
def add_child(self, child):
if child not in self.__children:
self.__children.append(child)
child.set_parent(self)
def remove_child(self, child):
child.set_parent(None)
while child in self.__children:
self.__children.remove(child)
class AWDScene(AWDSceneBlockBase):
def __init__(self, name='', transform=None):
super(AWDScene, self).__init__(name, transform)
class AWDMeshInst(AWDSceneBlockBase):
def __init__(self, geom, name='', transform=None):
self.geometry = geom
self.materials = []
super(AWDMeshInst, self).__init__(name, transform)
class AWDContainer(AWDSceneBlockBase):
def __init__(self, name='', transform=None):
super(AWDContainer, self).__init__(name, transform)
| Python |
class AWDWriter(object):
def __init__(self):
self.__block_addr = 1
def write(self, awd, file):
def write_blocks(blocks):
for b in blocks:
b.write_block(file, self.__block_addr)
self.__block_addr += 1
write_blocks(awd.texture_blocks)
write_blocks(awd.material_blocks)
write_blocks(awd.mesh_data_blocks)
write_blocks(awd.uvanim_blocks)
write_blocks(awd.skelanim_blocks)
write_blocks(awd.skelpose_blocks)
write_blocks(awd.skeleton_blocks)
write_blocks(awd.scene_blocks)
| Python |
from pyawd import utils
from pyawd import core
class GenericAnimFrame(object):
def __init__(self, data=None, duration=0):
self.duration = duration
self.data = data
class GenericAnim(object):
def __init__(self, frames):
super(GenericAnim, self).__init__()
self.__frames = frames
def __len__(self):
return len(self.__frames)
def __getitem__(self, key):
idx = int(key)
return self.__frames[idx]
def __setitem__(self, key, val):
idx = int(key)
if isinstance(val, GenericAnimFrame):
self.__frames[idx] = val
else:
raise ValueError('value must be GenericAnimFrame instance')
def __contains__(self, item):
return item in self.__frames
class AWDSkeleton(core.AWDBlockBase, core.AWDAttrElement):
def __init__(self, name=''):
super(AWDSkeleton, self).__init__()
self.name = name
self.root_joint = None
def joint_index(self, name=None, joint=None):
if name is None:
if joint is not None:
name = joint.name
else:
raise AttributeError('either name or joint argument must be defined.')
if self.root_joint is None:
return None
elif self.root_joint.name == name:
return self.root_joint
else:
def find_name(joints, cur_idx):
for j in joints:
#print('checking joint "%s", idx=%d' % (j.name, cur_idx))
if j.name == name:
return (cur_idx, cur_idx)
else:
found_idx, cur_idx = find_name(j._AWDSkeletonJoint__children, cur_idx+1)
if found_idx is not None:
return (found_idx, cur_idx)
return (None, cur_idx)
# Find joint, starting at 2 (1 being the root, which has already
# been checked outside of the recursion.)
ret = find_name(self.root_joint._AWDSkeletonJoint__children, 2)
if ret is not None:
return ret[0]
class AWDSkeletonAnimation(GenericAnim, core.AWDAttrElement, core.AWDBlockBase):
def __init__(self, name=''):
self.name = name
self.__frames = []
super(AWDSkeletonAnimation, self).__init__(self.__frames)
def add_frame(self, pose, duration):
dur = int(duration)
self.__frames.append(GenericAnimFrame(data=pose, duration=dur))
class AWDSkeletonJoint(core.AWDAttrElement):
def __init__(self, name='', inv_bind_mtx=None):
super(AWDSkeletonJoint, self).__init__()
self.name = name
self.inv_bind_mtx = inv_bind_mtx
self.__children = []
self.__parent = None
if self.inv_bind_mtx is None:
self.inv_bind_mtx = utils.AWDMatrix4x4()
def remove_child_joint(self, child):
self.__children.remove(child)
def add_child_joint(self, child):
if child.__parent is not None:
child.__parent.remove_child_joint(child)
child.__parent = self
self.__children.append(child)
class AWDSkeletonPose(core.AWDBlockBase, core.AWDAttrElement):
def __init__(self, name=''):
super(AWDSkeletonPose, self).__init__()
self.name = name
self.transforms = []
def add_joint_transform(self, transform=None):
self.transforms.append(transform)
class AWDUVAnimation(GenericAnim, core.AWDAttrElement, core.AWDBlockBase):
def __init__(self, name=''):
self.name = name
self.__frames = []
super(AWDUVAnimation, self).__init__(self.__frames)
def add_frame(self, transform, duration):
dur = int(duration)
self.__frames.append(GenericAnimFrame(data=transform, duration=dur))
| Python |
__all__ = [ 'core', 'anim', 'geom', 'material', 'scene' ]
version = ('PyAWD', 2, 0, 0, 'a')
try:
from pyawd import cpyawd
# Ignore release letter for final releases
release = chr(cpyawd.LIBAWD_VERSION_RELEASE)
if release == 'f':
release = ''
backend = ('libawd',
cpyawd.LIBAWD_VERSION_MAJOR,
cpyawd.LIBAWD_VERSION_MINOR,
cpyawd.LIBAWD_VERSION_BUILD,
release)
backend_str = '%s v%d.%d.%d%s' % backend
except:
backend = ('python', None, None)
backend_str = 'python'
version_str = '%s v%d.%d.%d%s (%s)' % (
version[0], version[1], version[2], version[3], version[4], backend_str)
| Python |
from pyawd import core
class AWDBitmapTexture(core.AWDAttrElement, core.AWDBlockBase):
EXTERNAL = 0
EMBED = 1
def __init__(self, type=0, name='', url=None):
super(AWDBitmapTexture, self).__init__()
self.type = type
self.name = name
self.url = url
def embed_file(self, path):
with open(path, 'rb') as f:
self.__data = f.read()
class AWDMaterial(core.AWDAttrElement, core.AWDBlockBase):
COLOR = 1
BITMAP = 2
def __init__(self, type=0, name='', texture=None):
super(AWDMaterial, self).__init__()
self.type = type
self.name = name
self.texture = texture
self.repeat = False
self.alpha_blending = False
self.alpha_threshold = 0.0
self.color = 0
self.alpha = 1.0
| Python |
import numpy as np
import cv2
import urllib2
import socket
import sys
import math
import os
class Processor:
def get_blue(self, img):
blue = cv2.split(img)[2]
return blue
def get_blue_hue(self, img):
#filters img based on hsv values of blue
#may need finetuning based on lighting conditions. Probably not, though.
hsv_img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
#this is what the LEDs shine, according to photoshop
#note for future people: photoshop, gimp, and opencv all use different number to represent the hsv values
#make sure that you convert them to opencv
blue_vals = [120, 235, 235]
upper = [140, 255, 255]
lower = [100, 200, 200]
#filter to only blue and return image
only_blue = cv2.inRange(hsv_img, np.array(lower), np.array(upper))
return only_blue
def getDistFromCenter():
#this is the main image processing function
#it is only called when the socket connection recieves a 'G'
#showPrint is used for logging. turn off for competition.
showPrint = False
"""grab image"""
opener = urllib2.build_opener()
page = opener.open(picurl)
pic =page.read()
#here, we write the image to disk because it needs to be read into opencv, not simply passed.
#this could probably be opmitted with the pillow library, but I don't feel like it.
#that would probably make this faster, especially considering we're running a class 2 SD card.
#note to self: use class 10 next time.
fout = open('image.jpg', 'wb')
fout.write(pic)
fout.close()
#read img into opencv format
img = cv2.imread('image.jpg')
GlobalWidth = 640
GlobalHeight = 480
#optionally resize by commenting out this line. It didn't have much impact, so no biggie.
resize = False
if resize:
img = cv2.resize(img, (GlobalWidth/2,GlobalHeight/2))
GlobalWidth = GlobalWidth/2
GlobalHeight = GlobalHeight/2
#show images on gui with cv2.imshow()
if showPrint: cv2.imshow('original_img', img)
#if showPrint: print 'got image...'
#grab only blue version and make a copy to work with
blue_only = processor.get_blue_hue(img)
goals_img = blue_only.copy()
#grab contours in image with basic search
contours, hierarchy = cv2.findContours(blue_only, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
#if showPrint: print 'foundContours...'
#if showPrint:
#blue_only = cv2.resize(blue_only, (GlobalWidth, GlobalHeight))
#cv2.drawContours(blue_only, contours, -1, (156,156,156), 3)
#cv2.imshow('all_contours', blue_only)
#this block of code grabs all the contours above a certain area and then chooses the smallest of them
os.system('clear')
area = 0
idx = -1
for i,cnt in enumerate(contours):
#for each contour
#if showPrint: print cv2.contourArea(cnt)
#if area is above 800 and below 8k, is probably goal
if (800 < cv2.contourArea(cnt) < 20000):
#temporarily grab convex hull and stuf for each contour
rect = cv2.convexHull(cnt)
minRect = cv2.minAreaRect(rect)
x1 = int(minRect[0][0])
y1 = int(minRect[0][1])
width = minRect[1][0]
height = minRect[1][1]
degree = minRect[2]
if showPrint:
print 'x1:', x1
print 'y1:', y1
print 'width:', width
print 'height:', height
print 'degree:', degree
if showPrint:
pass
#cv2.drawContours(blue_only, rect, -1, (156,156,156), 8)
#cv2.polylines(img, np.array(minRectPoints), True, (200,200,200), 20)
#retval = cv2.boundingRect(rect)
#print 'retval:', retval
#cv2.imshow('point_set', blue_only)
if minRect[1][0]:
ratio = minRect[1][1] / minRect[1][0]
else:
ratio = 0
if showPrint: print 'ratio', ratio
if ((2.9 < ratio < 3.3) or (0.25 < ratio < 0.37)):
#if the goal is part of the 3pt one, I only want the inside contour
#therefore grab the smallest contour that fullfills the ratio
if showPrint: print 'winning ratio:', ratio
if (area < cv2.contourArea(cnt)):
idx = i
"""
if (area < cnt.size):
idx = i
"""
#if showPrint: print 'idx:', idx
if (idx != -1):
#if a goal was found
if showPrint:
cv2.drawContours(goals_img, contours, idx, (50, 255, 60), 3)
cv2.imshow('rects', goals_img)
#grab an approx rect
rect = cv2.convexHull(contours[idx])
if showPrint: 'rect:', rect
#grab a minimum area rect
minRect = cv2.minAreaRect(rect)
area = cv2.contourArea(contours[idx])
if showPrint:
pass
#cv2.rectangle(goals_img,
#this is a good rect to use
#find centre point
#apparently the format is as follows: botLeftX, botLeftY, Width, Height
x1 = int(minRect[0][0])
y1 = int(minRect[0][1])
width = minRect[1][0]
height = minRect[1][1]
degree = minRect[2]
if showPrint: print 'DEGREE:', degree
# if opencv accidentally inverts the rectangle, this fixes it
if width < height:
width, height = height, width
# read the included whitepaper to fully understand this.
#it's simply a ratio of the pixel width to the actual width
if showPrint: print width
dist_FOV = 4.25*GlobalWidth/width
#this uses a little bit of trig to get the distance to the wall
if showPrint: print dist_FOV
dist_to_wall = (dist_FOV/2) / 0.41237445509
#this number is then corrected for error based on a function from excel
# and then finally rounded to milli-feet
new_dist_to_wall = dist_to_wall * (0.178195*np.log(dist_to_wall) + 0.6357449)
dist_to_wall = int(new_dist_to_wall*1000)
if showPrint:
print 'CALCULATED DIST', new_dist_to_wall
#correct ratio for 3 pt goal is almost exactly 3 +- 0.1
ratio = height/width
#if showPrint: print "h:", height, "width:", width, "ratio:", ratio
#for some reason, the point x1,y1 is the center, not a corner. It should be, but I won't question it.
#draw centre point
if showPrint:
cv2.circle(goals_img, (x1,y1), 6, (244,255,255))
cv2.line(goals_img, (GlobalWidth/2, GlobalHeight), (GlobalWidth/2, 0), (200,200,200))
cv2.imshow('rects', goals_img)
#now get distance from center of screen
#320 happens to be half of the width of the screen
#should probably use a variable to work with diff screens, but whatever
dist = 0
if (x1 < GlobalWidth/2):
dist = -(GlobalWidth/2 - x1)
elif (x1 > GlobalWidth/2):
dist = (GlobalWidth/2 - (GlobalWidth - x1))
elif (x1 == GlobalWidth/2):
dist = 0
#return all that info in format: distFromCenter, x1, y1, distToWall
#this function also returns the finished image, even though it is never used in the main loop
return str(dist) + ',' + str(x1) + ',' + str(y1) + ',' + str(dist_to_wall), goals_img
else:
#can't see anything: should report error
return 'n', goals_img
if __name__ == '__main__':
processor = Processor()
picurl = 'http://10.31.28.11/jpg/1/image.jpg'
#debugging is True if you want to bypass the socket connection and just work on image processing code
debugging = True
if (debugging == False):
#create the socket server
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
serv_addr = ('10.31.28.12', 4242)
print 'starting server on: ', serv_addr[0], ':', serv_addr[1]
sock.bind(serv_addr)
sock.listen(1)
conn = None
while True:
try:
#wait for connection- script will hang here until connection received
conn, cli_addr = sock.accept()
print 'connection from: ', cli_addr
#on connection, wait for 'g'
try:
while True:
try:
#hang here for data
recvd = conn.recv(4096)
print recvd
#this line used to be if (recvd == 'G'), but when testing with putty, I also
#received newlines, so this will work regardless
if ('G' in recvd):
#run command
result, img = getDistFromCenter()
print result
test = conn.send(str(result))
print 'sent:', test
else:
print 'not G'
except:
print ' exception:', sys.exc_info()[0]
conn.close()
print 'conn closed'
break
finally:
print 'connection closed'
conn.close()
except KeyboardInterrupt:
print 'LOSING'
if conn: conn.close()
break
else:
#are debugging
while True:
try:
result, goals_img = getDistFromCenter()
print 'SENDINGSENDINGSENDING: ', result
#cv2.imshow('testing', goals_img)
cv2.waitKey(1)
except KeyboardInterrupt:
break | Python |
# $Id: session.py,v 1.14 2010/03/20 18:29:22 jribbens Exp $
import time, hmac, re, random, os, errno
try:
import hashlib
sha = hashlib.sha1
shanew = hashlib.sha1
except ImportError:
import sha
shanew = sha.new
try:
import cPickle as pickle
except ImportError:
import pickle
class Error(Exception):
pass
class Session(dict):
def _make_hash(self, sid, secret):
"""Create a hash for 'sid'
This function may be overridden by subclasses."""
return hmac.new(secret, sid, sha).hexdigest()[:8]
def _create(self, secret):
"""Create a new session ID and, optionally hash
This function must insert the new session ID (which must be 8 hexadecimal
characters) into self["id"].
It may optionally insert the hash into self["hash"]. If it doesn't, then
_make_hash will automatically be called later.
This function may be overridden by subclasses.
"""
rnd = str(time.time()) + str(random.random()) + \
str(self._req["UNIQUE_ID"])
self["id"] = shanew(rnd).hexdigest()[:8]
def _load(self):
"""Load the session dictionary from somewhere
This function may be overridden by subclasses.
It should return 1 if the load was successful, or 0 if the session could
not be found. Any other type of error should raise an exception as usual."""
return 1
def save(self):
"""Save the session dictionary to somewhere
This function may be overridden by subclasses."""
pass
def tidy():
pass
tidy = staticmethod(tidy)
def __init__(self, req, secret, root="", sid=None, shash=None, secure=0, domain=None, create_new=True):
dict.__init__(self)
self["id"] = None
self._req = req
self.relocated = 0
self.new = 0
# try and determine existing session id
if sid is not None:
self["id"] = sid
if shash is None:
self["hash"] = self._make_hash(self["id"], secret)
else:
self["hash"] = shash
if self["hash"] != self._make_hash(self["id"], secret):
self["id"] = None
# try and load the session
if self["id"] is not None:
if not self._load():
self["id"] = None
# if no session was available and loaded, create a new one
if self["id"] is None and create_new:
if "hash" in self:
del self["hash"]
self.created = time.time()
self.new = 1
self._create(secret)
if "hash" not in self:
self["hash"] = self._make_hash(self["id"], secret)
class MemorySession(Session):
_sessions = {}
def _create(self, secret):
while 1:
Session._create(self, secret)
if self["id"] in self._sessions:
continue
self._sessions[self["id"]] = {"created": self.created,
"updated": self.created, "data": {}}
break
def _load(self):
try:
sess = self._sessions[self["id"]]
except KeyError:
return 0
self.created = sess["created"]
self.update(sess["data"])
return 1
def save(self):
sess = self._sessions[self["id"]]
sess["updated"] = time.time()
sess["data"] = self.copy()
def tidy(cls, max_idle=0, max_age=0):
now = time.time()
for k in cls._sessions:
if (max_age and k["created"] < now - max_age) or \
(max_idle and k["updated"] < now - max_idle):
del cls._sessions[k]
tidy = classmethod(tidy)
class GenericSQLSession(Session):
def _create(self, secret):
while 1:
Session._create(self, secret)
self["hash"] = self._make_hash(self["id"], secret)
try:
self.dbc.execute("INSERT INTO %s (ID,hash,created,updated,data)"
" VALUES (%%s,%%s,%%s,%%s,%%s)" % (self.table,),
(self["id"], self["hash"], int(self.created), int(self.created),
pickle.dumps({}, 1)))
self.dbc.execute("COMMIT")
except self.dbc.IntegrityError:
pass
else:
break
def _load(self):
self.dbc.execute("SELECT created,data FROM %s WHERE ID=%%s" % (self.table,),
(self["id"],))
if self.dbc.rowcount == 0:
return 0
row = self.dbc.fetchone()
self.created = row[0]
self.update(pickle.loads(row[1]))
return 1
def save(self):
self.dbc.execute("UPDATE %s SET updated=%%s,data=%%s"
" WHERE ID=%%s" % (self.table,), (int(time.time()),
pickle.dumps(self.copy(), 1), self["id"]))
self.dbc.execute("COMMIT")
@staticmethod
def tidy(dbc, table="sessions", max_idle=0, max_age=0):
now = time.time()
if max_idle:
dbc.execute("DELETE FROM %s WHERE updated < %%s" % (table,),
(now - max_idle,))
if max_age:
dbc.execute("DELETE FROM %s WHERE created < %%s" % (table,),
(now - max_age,))
if max_idle or max_age:
dbc.execute("COMMIT")
def remove(self):
self.dbc.execute("DELETE FROM %s WHERE ID = %%s AND hash = %%s" % (self.table,), (long(self['id'], 16), self['hash'],))
self.dbc.execute("COMMIT")
def __init__(self, req, secret, dbc, table="sessions", **kwargs):
self.dbc = dbc
self.table = table
Session.__init__(self, req, secret, **kwargs)
class MySQLSession(GenericSQLSession):
def _create(self, secret):
self.dbc.execute("LOCK TABLES %s WRITE" % (self.table,))
while 1:
Session._create(self, secret)
self.dbc.execute("SELECT 1 FROM %s WHERE ID=%%s" % (self.table,),
(long(self["id"], 16),))
if self.dbc.rowcount == 0:
break
self["hash"] = self._make_hash(self["id"], secret)
self.dbc.execute("INSERT INTO %s (ID,hash,created,updated,data) VALUES " \
"(%%s,%%s,%%s,%%s,%%s)" % (self.table,),
(long(self["id"], 16), self["hash"], int(self.created),
int(self.created), pickle.dumps({}, 1)))
self.dbc.execute("UNLOCK TABLES")
def _load(self):
self.dbc.execute("SELECT created,data FROM %s WHERE ID=%%s" % (self.table,),
(long(self["id"], 16),))
if self.dbc.rowcount == 0:
return 0
row = self.dbc.fetchone()
self.created = row[0]
self.update(pickle.loads(row[1]))
return 1
def save(self):
serialized = pickle.dumps(self.copy(), 1)
self.dbc.execute("UPDATE %s SET updated=%%s,data=%%s WHERE ID=%%s" % (self.table,), (int(time.time()), serialized, long(self["id"], 16)))
self.dbc.execute("COMMIT")
SQLSession = MySQLSession # backwards compatibility name
| Python |
class BasicHandler:
def __init__(self):
pass
| Python |
import handler
from models import directories
class CreateDir(handler.BasicHandler):
def run(self, app, input):
self.input = input
if not self.is_valid_input(): return { error: 'InvalidArguments' }
def is_valid_input(self):
return 'directory' in self.input or 'directoryId' in self.input
| Python |
import handler
from models import directories
class ListDir(handler.BasicHandler):
def run(self, app, input):
self.input = input
if 'directory' not in self.input: return { 'error': 'InvalidArguments' }
result = directories.get_by_hierarchical_path(self.input['directory'])
if result is None: return { 'error' : 'DirectoryNotExists' }
result['directory'] = result['directory'].serialize()
result['content'] = map(lambda item: item.serialize(), result['content'])
return result
| Python |
__all__ = [
'topicwrite',
'getfile',
'mkdir',
'listdir',
'login',
] | Python |
# -*- coding: utf-8 -*-
import dbo, sys
from string import Template
from web.db import sqlquote
from objects.directory import Directory
def _parse_hierarchy(paths):
"""
Распарсивает иерархию на join'ы
@param list paths
@return list
"""
vars = {}
left_joins = []
dir_no = 0
for path in paths:
dir_no += 1
if path.isdigit(): param = 'id'
else: param = 'name'
tpl = '\tLEFT JOIN directories AS dir${dir_no} ON dir${dir_no}.' + param + ' = ' + sqlquote(path) + ' AND dir${dir_no}.parent = dir${dir_parent}.id'
left_joins.append(Template(str(tpl)).substitute(dir_no=dir_no, dir_parent=dir_no-1))
# Выбираем результат из последней в иерархии директории
left_joins.append(Template('\tLEFT JOIN directories AS dir${dir_no}'\
' ON dir${dir_no}.parent = dir${dir_parent}.id')\
.substitute(dir_no=dir_no+1, dir_parent=dir_no))
return (left_joins, vars, dir_no)
def _search_by_hierarchy(hierarchy):
(left_joins, vars, dir_no) = _parse_hierarchy(hierarchy[1:])
vars['root_dir'] = hierarchy[0]
if hierarchy[0].isdigit(): root_dir_param = 'id'
else: root_dir_param = 'name'
# Собираем запрос
cols = []
for key in ('id', 'type', 'parent', 'name', ):
cols.append('dir' + str(dir_no) + '.' + key + ' AS r_' + key + ', '\
'dir' + str(dir_no+1) + '.' + key)
query = 'SELECT ' + ', '.join(cols) + \
' FROM directories AS dir0\n' + \
'\n'.join(left_joins) + \
'\nWHERE dir0.' + root_dir_param + ' = ' + sqlquote(vars['root_dir']) + ' AND dir0.parent = 0'
print str(query)
# Выполняем запрос
db = dbo.get_cursor()
db.execute(str(query))
tables = ('root_dir', 'dir_contents')
result = {}
row_data = {}
for row in db:
if row is None:
break
num = 0
for column in db.description:
column = column[0]
# fucking hack
if column.startswith('r_'):
table = tables[0]
column = column[2:]
else: table = tables[1]
#table = tables[column[6]]
if table not in result:
result[table] = []
if table not in row_data:
row_data[table] = {}
row_data[table][column] = row[num]
num += 1
for (key, value) in row_data.iteritems():
result[key].append(value)
row_data = {}
return result
def _get_root_contents():
"""
Возвращает директории с parent=0
@return list
"""
db = dbo.get_conn()
return db.query('SELECT root_dir.* FROM directories root_dir WHERE root_dir.parent = 0')
def get_by_hierarchical_path(hierarchy):
"""
Возвращает содержимое директории по указанному пути
@param string hierarchy
@return list Список потомков Entity (File и Directory)
"""
hierarchy = filter(lambda item: len(item.strip()) != 0, hierarchy.split('/')) # Удаляем пустые элементы из пути
dir_items = []
if not hierarchy:
results = _get_root_contents()
root_dir = Directory()
else:
results = _search_by_hierarchy(hierarchy)
if 'root_dir' not in results \
or results['root_dir'] is None \
or results['root_dir'][0]['id'] is None:
return None
root_dir = Directory(results['root_dir'][0])
results = results['dir_contents']
if not results:
return None
for result in results:
# Поддиректории
if result['type'] == 'directory':
dir_items.append(Directory(result.copy()))
return { 'directory' : root_dir, 'content' : dir_items }
| Python |
#-*- encoding: utf-8 -*-
import web
__conn = None
def connect(*arg, **kwarg):
global __conn
__conn = web.database(*arg, **kwarg)
def get_conn():
global __conn
return __conn
def get_cursor():
"""
Возвращает подключение к БД в стандартном для Python формате
"""
return get_conn()._db_cursor()
| Python |
__all__ = ['users', 'directories'] | Python |
#-*- encoding: utf-8 -*-
import web
#web.config.debug = False
from apihandler import *
from handlers import *
from models import dbo
# Соединяемся с БД
dbo.connect(dbn='mysql', user='root', pw='', db='console')
# Добавляем команды
handlers = {
# Пользователи
'login' : login.Login,
'registration' : login.Registration,
'logout' : login.Logout,
# Директории
'dir-get' : listdir.ListDir,
'mkdir' : mkdir.CreateDir,
}
for k in handlers:
HandleApi.add_handler(k, handlers[k])
HandleApi.app = web.application(('/', 'HandleApi'), globals())
if __name__ == "__main__": HandleApi.app.run() | Python |
#-*- encoding: utf-8 -*-
import web
import json
from objects.entity import Entity
def json_encode(func):
"""
Декоратор для вывода всех данных в JSON
@return func
"""
def encode(*args, **kwargs):
result = func(*args, **kwargs)
if isinstance(result, Entity):
return result.serialize()
return json.dumps(result)
return encode
# Производит обработку запросов к API
class HandleApi:
__handlers = {}
app = None
@json_encode
def GET(self):
input = web.input(_unicode=False)
if ('action' not in input) or (input.action not in HandleApi.__handlers):
return { 'error': 'InvalidAction' }
return HandleApi.__handlers[input.action].run(HandleApi.app, input)
@staticmethod
def add_handler(action, handler_class):
HandleApi.__handlers[action] = handler_class()
| Python |
class Entity(object):
_data = None
def __init__(self, data = None):
if hasattr(self, '_default_data'):
self._data = self._default_data.copy()
if data is not None:
for key in data.iterkeys():
self._data[key] = data[key]
else:
if data is not None:
self._data = data.copy()
else:
self._data = dict()
def serialize(self):
raise NotImplementedError("Abstract method")
def unserialize(self, json):
raise NotImplementedError("Abstract method")
def __getitem__(self, key):
try:
return self._data[key]
except KeyError:
raise AttributeError(key)
def __setitem__(self, key, value):
try:
self._data[key] = value
except KeyError:
raise AttributeError(key)
| Python |
Subsets and Splits
SQL Console for ajibawa-2023/Python-Code-Large
Provides a useful breakdown of language distribution in the training data, showing which languages have the most samples and helping identify potential imbalances across different language groups.