code stringlengths 1 1.72M | language stringclasses 1 value |
|---|---|
#!/usr/bin/python2.5
# Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Datastore models."""
from google.appengine.api import memcache
from google.appengine.ext import db
import modelutils
class Error(Exception):
"""Generic error."""
pass
class BadAccountType(Error):
"""Account type is unknown (not facebook, friendconnect, or test)."""
pass
# Models
class UserInfo(db.Model):
"""Basic user statistics/preferences data."""
# Key is accounttype:user_id.
first_visit = db.DateTimeProperty(auto_now_add=True)
last_edit = db.DateTimeProperty(auto_now=True)
moderator = db.BooleanProperty(default=False)
moderator_request_email = db.StringProperty()
moderator_request_desc = db.TextProperty()
moderator_request_admin_notes = db.StringProperty(multiline=True)
def account_type(self):
"""Returns one of (FRIENDCONNECT, FACEBOOK, TEST)."""
key_name = self.key().name()
return key_name.split(':', 1)[0]
def user_id(self):
"""User id."""
key_name = self.key().name()
return key_name.split(':', 1)[1]
# Known types of accounts. Type must not start with a number.
FRIENDCONNECT = 'friendconnect'
FACEBOOK = 'facebook'
TEST = 'test'
KNOWN_TYPES = (FRIENDCONNECT, FACEBOOK, TEST)
@classmethod
def get_or_insert_user(cls, account_type, user_id):
"""Gets existing or creates a new user.
Similar to get_or_insert, increments UserStats if appropriate.
Args:
account_type: Type of account used.
user_id: address within that system.
Returns:
UserInfo for this user.
Raises:
BadAccountType if the account_type is unknown.
Various datastore exceptions.
"""
if not account_type in cls.KNOWN_TYPES:
raise BadAccountType()
key_name = '%s:%s' % (account_type, user_id)
user_info = cls.get_by_key_name(key_name)
def txn():
"""Transaction to get or insert user."""
entity = cls.get_by_key_name(key_name)
created_entity = False
if entity is None:
entity = cls(key_name=key_name)
entity.put()
created_entity = True
return (entity, created_entity)
(user_info, created_entity) = db.run_in_transaction(txn)
if created_entity:
UserStats.increment(account_type, user_id)
return user_info
class UserStats(db.Model):
"""Stats about how many users we have."""
count = db.IntegerProperty(default=0)
@classmethod
def increment(cls, account_type, user_id):
"""Sharded counter. User ID is only for sharding."""
def txn():
"""Transaction to increment account_type's stats."""
# We want << 1000 shards.
# This cheesy shard mechanism allows us some amount of way to see how
# many users of each type we have too.
shard_name = account_type + ':' + user_id[:2]
counter = cls.get_by_key_name(shard_name)
if not counter:
counter = cls(key_name=shard_name)
counter.count += 1
counter.put()
db.run_in_transaction(txn)
@staticmethod
def get_count():
"""Returns total number of users."""
total = 0
for counter in UserStats.all():
total += counter.count
return total
class UserInterest(db.Model):
"""Our record a user's actions related to an opportunity."""
# Key is ('id:%s#%s' % (the stable ID from base, user key name))
# stable ID is probabaly not the same ID provided in the feed from providers.
DATASTORE_PREFIX = 'id:'
user = db.ReferenceProperty(UserInfo, collection_name='interests')
opp_id = db.StringProperty()
liked_last_modified = db.DateTimeProperty()
# The interest types (liked, will_attend, etc) must exist with the
# same property names in UserInterest and VolunteerOpportunityStats,
# and be in sync with USER_INTEREST_ATTRIBUTES at the end of this file.
liked = db.IntegerProperty(default=0)
will_attend = db.IntegerProperty(default=0)
flagged = db.IntegerProperty(default=0)
@classmethod
def make_key_name(cls, user_entity, opp_id):
"""Generate key name for a given user_entity/opp_id pair."""
return '%s:%s#%s' % (cls.DATASTORE_PREFIX, opp_id, user_entity.key().name())
class VolunteerOpportunityStats(db.Model):
"""Basic statistics about opportunities."""
# The __key__ is 'id:' + volunteer_opportunity_id
DATASTORE_PREFIX = 'id:'
MEMCACHE_PREFIX = 'VolunteerOpportunityStats:'
MEMCACHE_TIME = 60000 # seconds
last_edit = db.DateTimeProperty(auto_now=True)
# The interest types (liked, will_attend, etc) must exist with the
# same property names in UserInterest and VolunteerOpportunityStats,
# and be in sync with USER_INTEREST_ATTRIBUTES at the end of this file.
liked = db.IntegerProperty(default=0)
will_attend = db.IntegerProperty(default=0)
flagged = db.IntegerProperty(default=0)
# Blacklist is controlled by the moderators only, it is not a statistic.
blacklisted = db.IntegerProperty(default=0)
@classmethod
def increment(cls, volunteer_opportunity_id, relative_attributes,
absolute_attributes=None):
"""Helper to increment volunteer opportunity stats.
Example:
VolunteerOpportunityStats.increment(opp_id,
{ USER_INTEREST_LIKED: 1, USER_INTEREST_WILL_ATTEND: 1 })
Args:
volunteer_opportunity_id: ID of opportunity.
relative_attributes: Dictionary of attr_name:value pairs to set as
relative to current value.
absolute_attributes: Dictionary of attr_name:value pairs to set as
absolute values.
Returns:
Success boolean
"""
entity = VolunteerOpportunityStats.get_or_insert(
cls.DATASTORE_PREFIX + volunteer_opportunity_id)
if not entity:
return False
(new_entity, unused_deltas) = \
modelutils.set_entity_attributes(entity, absolute_attributes,
relative_attributes)
memcache.set(cls.MEMCACHE_PREFIX + volunteer_opportunity_id, new_entity,
time=cls.MEMCACHE_TIME)
return True
@classmethod
def set_blacklisted(cls, volunteer_opportunity_id, value):
"""Helper to set volunteer opportunity value and update memcache."""
# A wrapper for 'increment'--it's overkill, but manages memcache for us.
return cls.increment(volunteer_opportunity_id, {}, {'blacklisted' : value})
@classmethod
def add_default_entities_to_memcache(cls, ids):
"""Add blank entities to memcache so get_by_ids quickly returns them."""
entities = {}
for key in ids:
entities[key] = cls(key_name= cls.DATASTORE_PREFIX + key)
memcache.add_multi(entities, time=cls.MEMCACHE_TIME,
key_prefix=cls.MEMCACHE_PREFIX)
class VolunteerOpportunity(db.Model):
"""Basic information about opportunities.
Separate from VolunteerOpportunityStats because these entries need not be
operated on transactionally since there's no counts.
"""
# The __key__ is 'id:' + volunteer_opportunity_id
DATASTORE_PREFIX = 'id:'
MEMCACHE_PREFIX = 'VolunteerOpportunity:'
MEMCACHE_TIME = 60000 # seconds
# Information about the opportunity
# URL to the Google Base entry
base_url = db.StringProperty()
# When we last update the Base URL.
last_base_url_update = db.DateTimeProperty()
# Incremented (possibly incorrectly to avoid transactions) when we try
# to load the data from base but fail. Also the last date/time seen.
base_url_failure_count = db.IntegerProperty(default=0)
last_base_url_update_failure = db.DateTimeProperty()
# TODO(paul): added_to_calendar, added_to_facebook_profile, etc
USER_INTEREST_LIKED = 'liked'
USER_INTEREST_WILL_ATTEND = 'will_attend'
USER_INTEREST_FLAGGED = 'flagged'
USER_INTEREST_ATTRIBUTES = (
USER_INTEREST_LIKED,
USER_INTEREST_WILL_ATTEND,
USER_INTEREST_FLAGGED,
)
| Python |
#!/usr/bin/python
# Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import datetime
import logging
import re
import hashlib
import geocode
import utils
from xml.dom import minidom
from xml.sax.saxutils import escape
from google.appengine.ext import db
class Error(Exception):
pass
# status codes
# - string names to make them human-readable, i.e. easier debugging
# - leading number provides SQL/GQL sorting without an extra field
# (sorting is important for the moderator UI, to make sure most-
# likely-to-be-safe is ranked higher). Note: edited comes before
# plain new
# - substrings (e.g. "NEW") provide groupings, e.g. is this a 'new'
# listing, so the moderator UI know what visual treatment to give it.
NEW_EDITED_VERIFIED = "90.NEW_EDITED_VERIFIED"
NEW_VERIFIED = "80.NEW_VERIFIED"
NEW_EDITED = "70.NEW_EDITED"
NEW = "50.NEW"
NEW_DEFERRED = "40.NEW_DEFERRED"
ACCEPTED_MANUAL = "10.ACCEPTED_MANUAL"
ACCEPTED_AUTOMATIC = "10.ACCEPTED_AUTOMATIC"
REJECTED_MANUAL = "10.REJECTED_MANUAL"
REJECTED_AUTOMATIC = "10.REJECTED_AUTOMATIC"
class Posting(db.Model):
"""Postings going through the approval process."""
# Key is assigned ID (not the stable ID)
item_id = db.StringProperty(default="")
status = db.StringProperty(default=NEW)
# for queries, parse-out these fields - note that we don't care about datatypes
quality_score = db.FloatProperty(default=1.0)
creation_time = db.DateTimeProperty(auto_now_add=True)
start_date = db.DateProperty(auto_now_add=True)
# listing_xml is the full contents for the listing, assuming it gets approved
# note: listing_xml also used for fulltext queries
listing_xml = db.TextProperty(default="")
# parse-out these fields to improve latency in the moderation UI
title = db.StringProperty(default="")
description = db.TextProperty(default="")
# as per http://code.google.com/p/googleappengine/issues/detail?id=105
# there's no point in GeoPT esp. given that we're only using this for display
# there's even bugs (http://aralbalkan.com/1355) in GeoPT, so the heck with it.
#todo latlong = db.StringProperty(default="")
def statusChar(self):
if self.status.find("ACCEPTED")>=0:
return "A"
if self.status.find("REJECTED")>=0:
return "R"
return ""
def showInModerator(self):
return (self.status.find("NEW")>=0)
def isLive(self):
return (self.status.find("ACCEPTED")>=0)
def reset(self):
self.status = NEW
self.put()
def edit(self):
self.status = NEW_EDITED
self.put()
def verify(self):
if self.status == NEW:
self.status = NEW_VERIFIED
self.put()
elif self.status == NEW_EDITED:
# TODO: how do we know the edits didn't after the email was sent?
self.status = NEW_EDITED_VERIFIED
self.put()
def accept(self, type="MANUAL"):
if type == "AUTOMATIC":
self.status = ACCEPTED_AUTOMATIC
else:
self.status = ACCEPTED_MANUAL
self.put()
def reject(self, type="MANUAL"):
if type == "AUTOMATIC":
self.status = REJECTED_AUTOMATIC
else:
self.status = REJECTED_MANUAL
self.put()
def computeQualityScore(self):
# TODO: walk the object to look for missing/bad fields
self.quality_score = 1.0
self.put()
def process(args):
for arg in args:
if arg[0] != "v":
continue
keystr = arg[1:]
el = Posting.get(keystr)
if el == None:
# already deleted!
continue
# TODO: remove quality score hack-- this is how to rank in moderator UI
if args[arg] == "A":
el.accept()
elif args[arg] == "R":
el.reject()
elif args[arg] == "V":
el.verify()
elif args[arg] == "X":
logging.debug("deleting: "+keystr+" title="+el.title)
el.delete()
elif args[arg] == "":
el.reset()
def query(num=25, start=1, quality_score=0.5, start_date="2009-01-01"):
# TODO: GQL doesn't support string-CONTAINS, limiting keyword search
# TODO: GQL doesn't let you do inequality comparison on multiple fields.
if quality_score == 0.0:
sd = datetime.strptime(start_date, "%Y-%m-%d")
q = db.GqlQuery("SELECT * FROM Posting " +
"WHERE start_date >= :1 " +
"ORDER BY status ASC, start_date ASC " +
"LIMIT %d OFFSET %d" % (int(num), int(start)),
sd.date())
else:
q = db.GqlQuery("SELECT * FROM Posting " +
"ORDER BY status ASC,quality_score DESC " +
"LIMIT %d OFFSET %d" % (int(num), int(start)))
result_set = q.fetch(num)
reslist = []
for result in result_set:
result.key = str(result.key())
result.listing_fmtd = re.sub(r'><', '-qbr--', result.listing_xml);
result.listing_fmtd = re.sub(r'(<?/[a-zA-Z]+-qbr--)+', '-qbr--', result.listing_fmtd);
result.listing_fmtd = re.sub(r'>', ': ', result.listing_fmtd);
result.listing_fmtd = re.sub(r'-qbr--', '<br/>', result.listing_fmtd)
result.listing_fmtd = re.sub(r'(<br/>)+', '<br/>', result.listing_fmtd)
result.status_char = result.statusChar()
reslist.append(result)
return reslist
def create_from_xml(xml):
try:
dom = minidom.parseString(xml)
except:
return ""
posting = Posting(listing_xml=xml)
posting.title = utils.xml_elem_text(dom, "title", '')
logging.debug("create_from_xml: title="+posting.title)
logging.debug("create_from_xml: xml="+xml)
posting.description = utils.xml_elem_text(dom, "description", '')
try:
start_date = datetime.strptime(utils.xml_elem_text(
dom, "startDate", ''), "%Y-%m-%d")
posting.start_date = start_date.date()
except:
pass
# ignore bad start date
posting.item_id = hashlib.md5(xml+str(posting.creation_time)).hexdigest()
posting.put()
return posting.key()
argnames = {
"title":1, "description":1, "skills":1, "virtual":1, "addr1":1, "addrname1":1,
"sponsoringOrganizationName":1, "openEnded":1, "startDate":1,
"startTime":1, "endTime":1, "endDate":1, "contactNoneNeeded":1,
"contactEmail":1, "contactPhone":1, "contactName":1, "detailURL":1,
"weeklySun":1, "weeklyMon":1, "weeklyTue":1, "weeklyWed":1, "weeklyThu":1,
"weeklyFri":1, "weeklySat":1, "biweeklySun":1, "biweeklyMon":1,
"biweeklyTue":1, "biweeklyWed":1, "biweeklyThu":1, "biweeklyFri":1,
"biweeklySat":1, "recurrence":1, "audienceAll":1, "audienceAge":1,
"minAge":1, "audienceSexRestricted":1, "sexRestrictedTo":1,
"commitmentHoursPerWeek":1, "city":1, "region":1, "postalCode":1,
"country":1, "street1":1, "street2":1, "location_string":1
}
# TODO: replace with a better parser-- after wasting hours, I gave up
# on strptime(). Do not add to utils.py -- this is a bad hack
def parseTimestamp(dateStr, timeStr):
dateStr = dateStr.strip()
grp = re.match(r'(\d?\d)[/-]?(\d?\d)[/-]?(\d\d\d\d)', dateStr)
if grp:
month = int(grp.group(1))
day = int(grp.group(2))
year = int(grp.group(3))
else:
grp = re.match(r'(\d?\d)[/-]?(\d?\d)[/-]?(\d\d)', dateStr)
if grp:
month = int(grp.group(1))
day = int(grp.group(2))
year = int(grp.group(3)) + 1900
else:
grp = re.match(r'(\d\d\d\d)[/-]?(\d\d)[/-]?(\d\d)', dateStr)
if grp:
year = int(grp.group(1))
month = int(grp.group(2))
day = int(grp.group(3))
else:
return None
hour = minute = 0
timeStr = timeStr.strip().upper()
grp = re.match(r'(\d?\d):(\d\d) *(AM|PM)?', timeStr)
if grp:
hour = int(grp.group(1))
minute = int(grp.group(2))
ampm = grp.group(3)
if ampm == "PM":
hour += 12
else:
return None
try:
return datetime(year, month, day, hour, minute, 0)
except:
return None
def cleanup_args(vals):
# keep only known argnames
for key in vals:
if key in argnames:
vals[key] = escape(vals[key])
#vals[key] = re.sub(r'(<!\[CDATA\[\|\]\]>)', r'', vals[key])
else:
vals[key] = ""
for key in argnames:
if key not in vals:
vals[key] = ""
# blank-out incompatible fields
if vals["virtual"] != "No":
vals["virtual"] = "Yes"
vals["addr1"] = vals["addrname1"] = ""
if vals["openEnded"] != "No":
vals["openEnded"] = "Yes"
vals["startDate"] = vals["startTime"] = ""
vals["endDate"] = vals["endTime"] = ""
# footprint isn't very interesting when it comes to gender
if len(vals["sexRestrictedTo"]) < 1:
vals["sexRestrictedTo"] = ""
elif vals["sexRestrictedTo"][0].upper() == "M":
vals["sexRestrictedTo"] = "M"
elif vals["sexRestrictedTo"][0].upper() == "F":
vals["sexRestrictedTo"] = "F"
else:
vals["sexRestrictedTo"] = ""
# once, one-time or weekly, then blank-out biweekly
if (vals["recurrence"] == "Weekly" or
vals["recurrence"] == "No" or
vals["recurrence"] == "Daily"):
for arg in argnames:
if arg.find("biweekly") == 0:
vals[arg] == ""
# once, one-time or biweekly, then blank-out weekly
if (vals["recurrence"] == "BiWeekly" or
vals["recurrence"] == "No" or
vals["recurrence"] == "Daily"):
for arg in argnames:
if arg.find("weekly") == 0:
vals[arg] == ""
def add_new_fields(vals, newvals):
if vals["country"] == "":
vals["country"] = "US"
addr = vals["street1"]
addr += " "+vals["street2"]
addr += " "+vals["city"]
addr += " "+vals["region"]
addr += " "+vals["country"]
newvals["complete_addr"] = addr
logging.debug("post: geocoding "+addr)
latlong = geocode.geocode(addr)
logging.debug("post: latlong="+latlong)
if latlong == "":
newvals["latitude"] = newvals["longitude"] = ""
else:
newvals["latitude"],newvals["longitude"] = latlong.split(",")[:2]
newvals["parsedStartDate"] = newvals["parsedStartTime"] = ""
newvals["parsedEndDate"] = newvals["parsedEndTime"] = ""
if vals["openEnded"] == "No":
startTs = parseTimestamp(vals["startDate"], vals["startTime"])
if startTs:
newvals["parsedStartDate"] = startTs.strftime("%Y-%m-%d")
newvals["parsedStartTime"] = startTs.strftime("%H:%M:%S")
endTs = parseTimestamp(vals["endDate"], vals["endTime"])
if endTs:
newvals["parsedEndDate"] = endTs.strftime("%Y-%m-%d")
newvals["parsedEndTime"] = endTs.strftime("%H:%M:%S")
newvals["computedMinAge"] = 0
if vals["audienceAge"] == "seniors":
newvals["computedMinAge"] = 60
elif vals["audienceAge"] == "teens":
newvals["computedMinAge"] = 13
elif vals["audienceAge"] == "anyage":
newvals["computedMinAge"] = 0
else:
try:
newvals["computedMinAge"] = int(vals["minAge"])
except:
newvals["computedMinAge"] = 0
try:
newvals["computedCommitmentHoursPerWeek"] = int(vals["commitmentHoursPerWeek"])
if newvals["computedCommitmentHoursPerWeek"] < 0:
newvals["computedCommitmentHoursPerWeek"] = 0
except:
newvals["computedCommitmentHoursPerWeek"] = 0
def create_from_args(vals, computed_vals):
# note: don't need to worry (much) about hacked-forms because we're
# using CAPTCHA to avoid bot submissions.
cleanup_args(vals)
add_new_fields(vals, computed_vals)
if vals["virtual"] == 'No' and computed_vals["latitude"] == "":
return 402, "", "cannot find address: '"+computed_vals["complete_addr"]+"'"
xml = "<VolunteerOpportunity>"
if vals["recaptcha_response_field"] == "test":
# basic security measure
xml += "<isTest>Yes</isTest>"
vals["title"] = "T:" + vals["title"]
vals["description"] = "TEST DELETEME: " + vals["description"]
# TODO: organization
#xml += "<volunteerOpportunityID>%d</volunteerOpportunityID>" % (item_id)
#xml += "<sponsoringOrganizationIDs><sponsoringOrganizationID>%d</sponsoringOrganizationID></sponsoringOrganizationIDs>" % (item_id)
#xml += "<volunteerHubOrganizationIDs><volunteerHubOrganizationID>%s</volunteerHubOrganizationID></volunteerHubOrganizationIDs>" % ("")
xml += "<title>%s</title>" % (vals["title"])
xml += "<description>%s</description>" % (vals["description"])
xml += "<skills>%s</skills>" % (vals["skills"])
xml += "<minimumAge>%s</minimumAge>" % (str(computed_vals["computedMinAge"]))
xml += "<detailURL>%s</detailURL>" % (vals["detailURL"])
xml += "<locations>"
xml += "<location>"
xml += "<name>%s</name>" % (vals["addrname1"])
xml += "<city>%s</city>" % (vals["city"])
xml += "<region>%s</region>" % (vals["region"])
xml += "<postalCode>%s</postalCode>" % (vals["postalCode"])
xml += "<country>%s</country>" % (vals["country"])
xml += "<latitude>%s</latitude>" % (computed_vals["latitude"])
xml += "<longitude>%s</longitude>" % (computed_vals["longitude"])
xml += "</location>"
xml += "</locations>"
# TODO: category tags
#xml += "<categoryTags>"
#xml += "<categoryTag>Community</categoryTag>"
#xml += "</categoryTags>"
xml += "<dateTimeDurations>"
xml += "<dateTimeDuration>"
xml += "<openEnded>%s</openEnded>" % (vals["openEnded"])
if vals["openEnded"] == "No":
xml += "<startDate>%s</startDate>" % (computed_vals["startDate"])
xml += "<startTime>%s</startTime>" % (computed_vals["startTime"])
xml += "<endDate>%s</endDate>" % (computed_vals["endDate"])
xml += "<endTime>%s</endTime>" % (computed_vals["endTime"])
xml += "<commitmentHoursPerWeek>%d</commitmentHoursPerWeek>" % \
(computed_vals["computedCommitmentHoursPerWeek"])
xml += "</dateTimeDuration>"
xml += "</dateTimeDurations>"
xml += "</VolunteerOpportunity>"
#logging.info(re.sub(r'><', '>\n<', xml))
item_id = create_from_xml(xml)
return 200, item_id, xml
def createTestDatabase():
id1 = create_from_xml("<VolunteerOpportunity><volunteerOpportunityID>1001</volunteerOpportunityID><sponsoringOrganizationIDs><sponsoringOrganizationID>1</sponsoringOrganizationID></sponsoringOrganizationIDs><volunteerHubOrganizationIDs><volunteerHubOrganizationID>3011</volunteerHubOrganizationID></volunteerHubOrganizationIDs><title>Be a Business Mentor - Trenton, NJ & Beyond</title><dateTimeDurations><dateTimeDuration><openEnded>Yes</openEnded><duration>P6M</duration><commitmentHoursPerWeek>4</commitmentHoursPerWeek></dateTimeDuration></dateTimeDurations><locations><location><city>Trenton</city><region>NJ</region><postalCode>08608</postalCode></location><location><city>Berkeley</city><region>CA</region><postalCode>94703</postalCode></location><location><city>Santa Cruz</city><region>CA</region><postalCode>95062</postalCode></location></locations><categoryTags><categoryTag>Community</categoryTag><categoryTag>Computers & Technology</categoryTag><categoryTag>Employment</categoryTag></categoryTags><minimumAge>21</minimumAge><skills>In order to maintain the integrity of the MicroMentor program, we require that our Mentor volunteers have significant business experience and expertise, such as: 3 years of business ownership experience</skills><detailURL>http://www.volunteermatch.org/search/index.jsp?l=08540</detailURL><description>This is where you come in. Simply by sharing your business know-how, you can make a huge difference in the lives of entrepreneurs from low-income and marginalized communities, helping them navigate the opportunities and challenges of running a business and improving their economic well-being and creating new jobs where they are most needed.</description></VolunteerOpportunity>")
id2 = create_from_xml("<VolunteerOpportunity><volunteerOpportunityID>2001</volunteerOpportunityID><sponsoringOrganizationIDs><sponsoringOrganizationID>2</sponsoringOrganizationID></sponsoringOrganizationIDs><title>DODGEBALL TO HELP AREA HUNGRY</title><dateTimeDurations><dateTimeDuration><openEnded>No</openEnded><startDate>2009-02-22</startDate><endDate>2009-02-22</endDate><startTime>18:45:00</startTime><endTime>21:00:00</endTime></dateTimeDuration><dateTimeDuration><openEnded>No</openEnded><startDate>2009-02-27</startDate><endDate>2009-02-27</endDate><startTime>18:45:00</startTime><endTime>21:00:00</endTime></dateTimeDuration></dateTimeDurations><locations><location><city>West Windsor</city><region>NJ</region><postalCode>08550</postalCode></location></locations><audienceTags><audienceTag>Teens</audienceTag><audienceTag>High School Students</audienceTag></audienceTags><categoryTags><categoryTag>Community</categoryTag><categoryTag>Homeless & Hungry</categoryTag><categoryTag>Hunger</categoryTag></categoryTags><minimumAge>14</minimumAge><skills>Must be in High School</skills><detailURL>http://www.volunteermatch.org/search/opp451561.jsp</detailURL><description>The Mercer County Quixote Quest Teen Volunteer Club is hosting a FUN Dodgeball Tournament at Mercer County College on Sunday afternoon, February 22nd. The proceeds from the event will bebefit the Trenton Area Soup Kitchen. Teens are invited to enter a team of six...with at least three female players (3 guys and 3 girls or more girls). Each team playing will bring a $50 entry fee and a matching sponsor donation of $50. (Total of $100 from each team).</description><lastUpdated olsonTZ=\"America/Denver\">2009-02-02T19:02:01</lastUpdated></VolunteerOpportunity>")
id3 = create_from_xml("<VolunteerOpportunity><volunteerOpportunityID>2002</volunteerOpportunityID><sponsoringOrganizationIDs><sponsoringOrganizationID>2</sponsoringOrganizationID></sponsoringOrganizationIDs><title>YOUNG ADULT TO HELP GUIDE MERCER COUNTY TEEN VOLUNTEER CLUB</title><volunteersNeeded>3</volunteersNeeded><dateTimeDurations><dateTimeDuration><openEnded>No</openEnded><startDate>2009-01-01</startDate><endDate>2009-05-31</endDate><iCalRecurrence>FREQ=WEEKLY;INTERVAL=2</iCalRecurrence><commitmentHoursPerWeek>2</commitmentHoursPerWeek></dateTimeDuration></dateTimeDurations><locations><location><city>Mercer County</city><region>NJ</region><postalCode>08610</postalCode></location></locations><audienceTags><audienceTag>Teens</audienceTag></audienceTags><categoryTags><categoryTag>Community</categoryTag><categoryTag>Children & Youth</categoryTag></categoryTags><skills>Be interested in promoting youth volunteerism. Be available two Tuesday evenings per month.</skills><detailURL>http://www.volunteermatch.org/search/opp200517.jsp</detailURL><description>Quixote Quest is a volunteer club for teens who have a passion for community service. The teens each volunteer for their own specific cause. Twice monthly, the club meets. At the club meetings the teens from different high schools come together for two hours to talk about their volunteer experiences and spend some hang-out time together that helps them bond as fraternity...family. Quixote Quest is seeking young adults roughly between 20 and 30 years of age who would be interested in being a guide and advisor to the teens during these two evening meetings a month.</description><lastUpdated olsonTZ=\"America/Denver\">2008-12-02T19:02:01</lastUpdated></VolunteerOpportunity>")
return (id1,id2,id3)
| Python |
#!/usr/bin/python
# Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import datetime
import logging
import re
import hashlib
import geocode
import utils
from xml.dom import minidom
from xml.sax.saxutils import escape
from google.appengine.ext import db
class Error(Exception):
pass
# status codes
# - string names to make them human-readable, i.e. easier debugging
# - leading number provides SQL/GQL sorting without an extra field
# (sorting is important for the moderator UI, to make sure most-
# likely-to-be-safe is ranked higher). Note: edited comes before
# plain new
# - substrings (e.g. "NEW") provide groupings, e.g. is this a 'new'
# listing, so the moderator UI know what visual treatment to give it.
NEW_EDITED_VERIFIED = "90.NEW_EDITED_VERIFIED"
NEW_VERIFIED = "80.NEW_VERIFIED"
NEW_EDITED = "70.NEW_EDITED"
NEW = "50.NEW"
NEW_DEFERRED = "40.NEW_DEFERRED"
ACCEPTED_MANUAL = "10.ACCEPTED_MANUAL"
ACCEPTED_AUTOMATIC = "10.ACCEPTED_AUTOMATIC"
REJECTED_MANUAL = "10.REJECTED_MANUAL"
REJECTED_AUTOMATIC = "10.REJECTED_AUTOMATIC"
class Posting(db.Model):
"""Postings going through the approval process."""
# Key is assigned ID (not the stable ID)
item_id = db.StringProperty(default="")
status = db.StringProperty(default=NEW)
# for queries, parse-out these fields - note that we don't care about datatypes
quality_score = db.FloatProperty(default=1.0)
creation_time = db.DateTimeProperty(auto_now_add=True)
start_date = db.DateProperty(auto_now_add=True)
# listing_xml is the full contents for the listing, assuming it gets approved
# note: listing_xml also used for fulltext queries
listing_xml = db.TextProperty(default="")
# parse-out these fields to improve latency in the moderation UI
title = db.StringProperty(default="")
description = db.TextProperty(default="")
# as per http://code.google.com/p/googleappengine/issues/detail?id=105
# there's no point in GeoPT esp. given that we're only using this for display
# there's even bugs (http://aralbalkan.com/1355) in GeoPT, so the heck with it.
#todo latlong = db.StringProperty(default="")
def statusChar(self):
if self.status.find("ACCEPTED")>=0:
return "A"
if self.status.find("REJECTED")>=0:
return "R"
return ""
def showInModerator(self):
return (self.status.find("NEW")>=0)
def isLive(self):
return (self.status.find("ACCEPTED")>=0)
def reset(self):
self.status = NEW
self.put()
def edit(self):
self.status = NEW_EDITED
self.put()
def verify(self):
if self.status == NEW:
self.status = NEW_VERIFIED
self.put()
elif self.status == NEW_EDITED:
# TODO: how do we know the edits didn't after the email was sent?
self.status = NEW_EDITED_VERIFIED
self.put()
def accept(self, type="MANUAL"):
if type == "AUTOMATIC":
self.status = ACCEPTED_AUTOMATIC
else:
self.status = ACCEPTED_MANUAL
self.put()
def reject(self, type="MANUAL"):
if type == "AUTOMATIC":
self.status = REJECTED_AUTOMATIC
else:
self.status = REJECTED_MANUAL
self.put()
def computeQualityScore(self):
# TODO: walk the object to look for missing/bad fields
self.quality_score = 1.0
self.put()
def process(args):
for arg in args:
if arg[0] != "v":
continue
keystr = arg[1:]
el = Posting.get(keystr)
if el == None:
# already deleted!
continue
# TODO: remove quality score hack-- this is how to rank in moderator UI
if args[arg] == "A":
el.accept()
elif args[arg] == "R":
el.reject()
elif args[arg] == "V":
el.verify()
elif args[arg] == "X":
logging.debug("deleting: "+keystr+" title="+el.title)
el.delete()
elif args[arg] == "":
el.reset()
def query(num=25, start=1, quality_score=0.5, start_date="2009-01-01"):
# TODO: GQL doesn't support string-CONTAINS, limiting keyword search
# TODO: GQL doesn't let you do inequality comparison on multiple fields.
if quality_score == 0.0:
sd = datetime.strptime(start_date, "%Y-%m-%d")
q = db.GqlQuery("SELECT * FROM Posting " +
"WHERE start_date >= :1 " +
"ORDER BY status ASC, start_date ASC " +
"LIMIT %d OFFSET %d" % (int(num), int(start)),
sd.date())
else:
q = db.GqlQuery("SELECT * FROM Posting " +
"ORDER BY status ASC,quality_score DESC " +
"LIMIT %d OFFSET %d" % (int(num), int(start)))
result_set = q.fetch(num)
reslist = []
for result in result_set:
result.key = str(result.key())
result.listing_fmtd = re.sub(r'><', '-qbr--', result.listing_xml);
result.listing_fmtd = re.sub(r'(<?/[a-zA-Z]+-qbr--)+', '-qbr--', result.listing_fmtd);
result.listing_fmtd = re.sub(r'>', ': ', result.listing_fmtd);
result.listing_fmtd = re.sub(r'-qbr--', '<br/>', result.listing_fmtd)
result.listing_fmtd = re.sub(r'(<br/>)+', '<br/>', result.listing_fmtd)
result.status_char = result.statusChar()
reslist.append(result)
return reslist
def create_from_xml(xml):
try:
dom = minidom.parseString(xml)
except:
return ""
posting = Posting(listing_xml=xml)
posting.title = utils.xml_elem_text(dom, "title", '')
logging.debug("create_from_xml: title="+posting.title)
logging.debug("create_from_xml: xml="+xml)
posting.description = utils.xml_elem_text(dom, "description", '')
try:
start_date = datetime.strptime(utils.xml_elem_text(
dom, "startDate", ''), "%Y-%m-%d")
posting.start_date = start_date.date()
except:
pass
# ignore bad start date
posting.item_id = hashlib.md5(xml+str(posting.creation_time)).hexdigest()
posting.put()
return posting.key()
argnames = {
"title":1, "description":1, "skills":1, "virtual":1, "addr1":1, "addrname1":1,
"sponsoringOrganizationName":1, "openEnded":1, "startDate":1,
"startTime":1, "endTime":1, "endDate":1, "contactNoneNeeded":1,
"contactEmail":1, "contactPhone":1, "contactName":1, "detailURL":1,
"weeklySun":1, "weeklyMon":1, "weeklyTue":1, "weeklyWed":1, "weeklyThu":1,
"weeklyFri":1, "weeklySat":1, "biweeklySun":1, "biweeklyMon":1,
"biweeklyTue":1, "biweeklyWed":1, "biweeklyThu":1, "biweeklyFri":1,
"biweeklySat":1, "recurrence":1, "audienceAll":1, "audienceAge":1,
"minAge":1, "audienceSexRestricted":1, "sexRestrictedTo":1,
"commitmentHoursPerWeek":1, "city":1, "region":1, "postalCode":1,
"country":1, "street1":1, "street2":1, "location_string":1
}
# TODO: replace with a better parser-- after wasting hours, I gave up
# on strptime(). Do not add to utils.py -- this is a bad hack
def parseTimestamp(dateStr, timeStr):
dateStr = dateStr.strip()
grp = re.match(r'(\d?\d)[/-]?(\d?\d)[/-]?(\d\d\d\d)', dateStr)
if grp:
month = int(grp.group(1))
day = int(grp.group(2))
year = int(grp.group(3))
else:
grp = re.match(r'(\d?\d)[/-]?(\d?\d)[/-]?(\d\d)', dateStr)
if grp:
month = int(grp.group(1))
day = int(grp.group(2))
year = int(grp.group(3)) + 1900
else:
grp = re.match(r'(\d\d\d\d)[/-]?(\d\d)[/-]?(\d\d)', dateStr)
if grp:
year = int(grp.group(1))
month = int(grp.group(2))
day = int(grp.group(3))
else:
return None
hour = minute = 0
timeStr = timeStr.strip().upper()
grp = re.match(r'(\d?\d):(\d\d) *(AM|PM)?', timeStr)
if grp:
hour = int(grp.group(1))
minute = int(grp.group(2))
ampm = grp.group(3)
if ampm == "PM":
hour += 12
else:
return None
try:
return datetime(year, month, day, hour, minute, 0)
except:
return None
def cleanup_args(vals):
# keep only known argnames
for key in vals:
if key in argnames:
vals[key] = escape(vals[key])
#vals[key] = re.sub(r'(<!\[CDATA\[\|\]\]>)', r'', vals[key])
else:
vals[key] = ""
for key in argnames:
if key not in vals:
vals[key] = ""
# blank-out incompatible fields
if vals["virtual"] != "No":
vals["virtual"] = "Yes"
vals["addr1"] = vals["addrname1"] = ""
if vals["openEnded"] != "No":
vals["openEnded"] = "Yes"
vals["startDate"] = vals["startTime"] = ""
vals["endDate"] = vals["endTime"] = ""
# footprint isn't very interesting when it comes to gender
if len(vals["sexRestrictedTo"]) < 1:
vals["sexRestrictedTo"] = ""
elif vals["sexRestrictedTo"][0].upper() == "M":
vals["sexRestrictedTo"] = "M"
elif vals["sexRestrictedTo"][0].upper() == "F":
vals["sexRestrictedTo"] = "F"
else:
vals["sexRestrictedTo"] = ""
# once, one-time or weekly, then blank-out biweekly
if (vals["recurrence"] == "Weekly" or
vals["recurrence"] == "No" or
vals["recurrence"] == "Daily"):
for arg in argnames:
if arg.find("biweekly") == 0:
vals[arg] == ""
# once, one-time or biweekly, then blank-out weekly
if (vals["recurrence"] == "BiWeekly" or
vals["recurrence"] == "No" or
vals["recurrence"] == "Daily"):
for arg in argnames:
if arg.find("weekly") == 0:
vals[arg] == ""
def add_new_fields(vals, newvals):
if vals["country"] == "":
vals["country"] = "US"
addr = vals["street1"]
addr += " "+vals["street2"]
addr += " "+vals["city"]
addr += " "+vals["region"]
addr += " "+vals["country"]
newvals["complete_addr"] = addr
logging.debug("post: geocoding "+addr)
latlong = geocode.geocode(addr)
logging.debug("post: latlong="+latlong)
if latlong == "":
newvals["latitude"] = newvals["longitude"] = ""
else:
newvals["latitude"],newvals["longitude"] = latlong.split(",")[:2]
newvals["parsedStartDate"] = newvals["parsedStartTime"] = ""
newvals["parsedEndDate"] = newvals["parsedEndTime"] = ""
if vals["openEnded"] == "No":
startTs = parseTimestamp(vals["startDate"], vals["startTime"])
if startTs:
newvals["parsedStartDate"] = startTs.strftime("%Y-%m-%d")
newvals["parsedStartTime"] = startTs.strftime("%H:%M:%S")
endTs = parseTimestamp(vals["endDate"], vals["endTime"])
if endTs:
newvals["parsedEndDate"] = endTs.strftime("%Y-%m-%d")
newvals["parsedEndTime"] = endTs.strftime("%H:%M:%S")
newvals["computedMinAge"] = 0
if vals["audienceAge"] == "seniors":
newvals["computedMinAge"] = 60
elif vals["audienceAge"] == "teens":
newvals["computedMinAge"] = 13
elif vals["audienceAge"] == "anyage":
newvals["computedMinAge"] = 0
else:
try:
newvals["computedMinAge"] = int(vals["minAge"])
except:
newvals["computedMinAge"] = 0
try:
newvals["computedCommitmentHoursPerWeek"] = int(vals["commitmentHoursPerWeek"])
if newvals["computedCommitmentHoursPerWeek"] < 0:
newvals["computedCommitmentHoursPerWeek"] = 0
except:
newvals["computedCommitmentHoursPerWeek"] = 0
def create_from_args(vals, computed_vals):
# note: don't need to worry (much) about hacked-forms because we're
# using CAPTCHA to avoid bot submissions.
cleanup_args(vals)
add_new_fields(vals, computed_vals)
if vals["virtual"] == 'No' and computed_vals["latitude"] == "":
return 402, "", "cannot find address: '"+computed_vals["complete_addr"]+"'"
xml = "<VolunteerOpportunity>"
if vals["recaptcha_response_field"] == "test":
# basic security measure
xml += "<isTest>Yes</isTest>"
vals["title"] = "T:" + vals["title"]
vals["description"] = "TEST DELETEME: " + vals["description"]
# TODO: organization
#xml += "<volunteerOpportunityID>%d</volunteerOpportunityID>" % (item_id)
#xml += "<sponsoringOrganizationIDs><sponsoringOrganizationID>%d</sponsoringOrganizationID></sponsoringOrganizationIDs>" % (item_id)
#xml += "<volunteerHubOrganizationIDs><volunteerHubOrganizationID>%s</volunteerHubOrganizationID></volunteerHubOrganizationIDs>" % ("")
xml += "<title>%s</title>" % (vals["title"])
xml += "<description>%s</description>" % (vals["description"])
xml += "<skills>%s</skills>" % (vals["skills"])
xml += "<minimumAge>%s</minimumAge>" % (str(computed_vals["computedMinAge"]))
xml += "<detailURL>%s</detailURL>" % (vals["detailURL"])
xml += "<locations>"
xml += "<location>"
xml += "<name>%s</name>" % (vals["addrname1"])
xml += "<city>%s</city>" % (vals["city"])
xml += "<region>%s</region>" % (vals["region"])
xml += "<postalCode>%s</postalCode>" % (vals["postalCode"])
xml += "<country>%s</country>" % (vals["country"])
xml += "<latitude>%s</latitude>" % (computed_vals["latitude"])
xml += "<longitude>%s</longitude>" % (computed_vals["longitude"])
xml += "</location>"
xml += "</locations>"
# TODO: category tags
#xml += "<categoryTags>"
#xml += "<categoryTag>Community</categoryTag>"
#xml += "</categoryTags>"
xml += "<dateTimeDurations>"
xml += "<dateTimeDuration>"
xml += "<openEnded>%s</openEnded>" % (vals["openEnded"])
if vals["openEnded"] == "No":
xml += "<startDate>%s</startDate>" % (computed_vals["startDate"])
xml += "<startTime>%s</startTime>" % (computed_vals["startTime"])
xml += "<endDate>%s</endDate>" % (computed_vals["endDate"])
xml += "<endTime>%s</endTime>" % (computed_vals["endTime"])
xml += "<commitmentHoursPerWeek>%d</commitmentHoursPerWeek>" % \
(computed_vals["computedCommitmentHoursPerWeek"])
xml += "</dateTimeDuration>"
xml += "</dateTimeDurations>"
xml += "</VolunteerOpportunity>"
#logging.info(re.sub(r'><', '>\n<', xml))
item_id = create_from_xml(xml)
return 200, item_id, xml
def createTestDatabase():
id1 = create_from_xml("<VolunteerOpportunity><volunteerOpportunityID>1001</volunteerOpportunityID><sponsoringOrganizationIDs><sponsoringOrganizationID>1</sponsoringOrganizationID></sponsoringOrganizationIDs><volunteerHubOrganizationIDs><volunteerHubOrganizationID>3011</volunteerHubOrganizationID></volunteerHubOrganizationIDs><title>Be a Business Mentor - Trenton, NJ & Beyond</title><dateTimeDurations><dateTimeDuration><openEnded>Yes</openEnded><duration>P6M</duration><commitmentHoursPerWeek>4</commitmentHoursPerWeek></dateTimeDuration></dateTimeDurations><locations><location><city>Trenton</city><region>NJ</region><postalCode>08608</postalCode></location><location><city>Berkeley</city><region>CA</region><postalCode>94703</postalCode></location><location><city>Santa Cruz</city><region>CA</region><postalCode>95062</postalCode></location></locations><categoryTags><categoryTag>Community</categoryTag><categoryTag>Computers & Technology</categoryTag><categoryTag>Employment</categoryTag></categoryTags><minimumAge>21</minimumAge><skills>In order to maintain the integrity of the MicroMentor program, we require that our Mentor volunteers have significant business experience and expertise, such as: 3 years of business ownership experience</skills><detailURL>http://www.volunteermatch.org/search/index.jsp?l=08540</detailURL><description>This is where you come in. Simply by sharing your business know-how, you can make a huge difference in the lives of entrepreneurs from low-income and marginalized communities, helping them navigate the opportunities and challenges of running a business and improving their economic well-being and creating new jobs where they are most needed.</description></VolunteerOpportunity>")
id2 = create_from_xml("<VolunteerOpportunity><volunteerOpportunityID>2001</volunteerOpportunityID><sponsoringOrganizationIDs><sponsoringOrganizationID>2</sponsoringOrganizationID></sponsoringOrganizationIDs><title>DODGEBALL TO HELP AREA HUNGRY</title><dateTimeDurations><dateTimeDuration><openEnded>No</openEnded><startDate>2009-02-22</startDate><endDate>2009-02-22</endDate><startTime>18:45:00</startTime><endTime>21:00:00</endTime></dateTimeDuration><dateTimeDuration><openEnded>No</openEnded><startDate>2009-02-27</startDate><endDate>2009-02-27</endDate><startTime>18:45:00</startTime><endTime>21:00:00</endTime></dateTimeDuration></dateTimeDurations><locations><location><city>West Windsor</city><region>NJ</region><postalCode>08550</postalCode></location></locations><audienceTags><audienceTag>Teens</audienceTag><audienceTag>High School Students</audienceTag></audienceTags><categoryTags><categoryTag>Community</categoryTag><categoryTag>Homeless & Hungry</categoryTag><categoryTag>Hunger</categoryTag></categoryTags><minimumAge>14</minimumAge><skills>Must be in High School</skills><detailURL>http://www.volunteermatch.org/search/opp451561.jsp</detailURL><description>The Mercer County Quixote Quest Teen Volunteer Club is hosting a FUN Dodgeball Tournament at Mercer County College on Sunday afternoon, February 22nd. The proceeds from the event will bebefit the Trenton Area Soup Kitchen. Teens are invited to enter a team of six...with at least three female players (3 guys and 3 girls or more girls). Each team playing will bring a $50 entry fee and a matching sponsor donation of $50. (Total of $100 from each team).</description><lastUpdated olsonTZ=\"America/Denver\">2009-02-02T19:02:01</lastUpdated></VolunteerOpportunity>")
id3 = create_from_xml("<VolunteerOpportunity><volunteerOpportunityID>2002</volunteerOpportunityID><sponsoringOrganizationIDs><sponsoringOrganizationID>2</sponsoringOrganizationID></sponsoringOrganizationIDs><title>YOUNG ADULT TO HELP GUIDE MERCER COUNTY TEEN VOLUNTEER CLUB</title><volunteersNeeded>3</volunteersNeeded><dateTimeDurations><dateTimeDuration><openEnded>No</openEnded><startDate>2009-01-01</startDate><endDate>2009-05-31</endDate><iCalRecurrence>FREQ=WEEKLY;INTERVAL=2</iCalRecurrence><commitmentHoursPerWeek>2</commitmentHoursPerWeek></dateTimeDuration></dateTimeDurations><locations><location><city>Mercer County</city><region>NJ</region><postalCode>08610</postalCode></location></locations><audienceTags><audienceTag>Teens</audienceTag></audienceTags><categoryTags><categoryTag>Community</categoryTag><categoryTag>Children & Youth</categoryTag></categoryTags><skills>Be interested in promoting youth volunteerism. Be available two Tuesday evenings per month.</skills><detailURL>http://www.volunteermatch.org/search/opp200517.jsp</detailURL><description>Quixote Quest is a volunteer club for teens who have a passion for community service. The teens each volunteer for their own specific cause. Twice monthly, the club meets. At the club meetings the teens from different high schools come together for two hours to talk about their volunteer experiences and spend some hang-out time together that helps them bond as fraternity...family. Quixote Quest is seeking young adults roughly between 20 and 30 years of age who would be interested in being a guide and advisor to the teens during these two evening meetings a month.</description><lastUpdated olsonTZ=\"America/Denver\">2008-12-02T19:02:01</lastUpdated></VolunteerOpportunity>")
return (id1,id2,id3)
| Python |
# Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
low-level routines for querying Google Base and processing the results.
Please don't call this directly-- instead call search.py
"""
import datetime
import time
import re
import urllib
import logging
import traceback
from google.appengine.api import memcache
from google.appengine.api import urlfetch
from xml.dom import minidom
import api
import geocode
import models
import modelutils
import posting
import searchresult
import utils
RESULT_CACHE_TIME = 900 # seconds
RESULT_CACHE_KEY = 'searchresult:'
# google base has a bug where negative numbers aren't indexed correctly,
# so we load the data with only positive numbers for lat/long.
# this should be a big number and of course must be sync'd with the
# value in datahub/*
GBASE_LOC_FIXUP = 1000
# Date format pattern used in date ranges.
DATE_FORMAT_PATTERN = re.compile(r'\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}')
# max number of results to ask from Base (for latency-- and correctness?)
BASE_MAX_RESULTS = 1000
# what base customer/author ID did we load the data under?
BASE_CUSTOMER_ID = 5663714
def base_argname(name):
"""base-sepcific urlparams all start with "base_" to avoid conflicts with
non-base-specific args, and also to signal to appwriters that they're base
specific and to cautious about their usage."""
return "base_" + name
def base_orderby_arg(args):
"""convert from footprint ranking/sorting order to Base order."""
# TODO: implement other scenarios for orderby
if args[api.PARAM_SORT] == "m":
# newest
return "modification_time"
# "relevancy" is the Base default
return "relevancy"
def base_restrict_str(key, val=None):
"""convert from key=val to Base restrict syntax."""
res = '+[' + urllib.quote_plus(re.sub(r'_', r' ', key))
if val != None:
res += ':' + urllib.quote_plus(str(val))
return res + ']'
def form_base_query(args):
"""ensure args[] has all correct and well-formed members and
return a base query string."""
logging.debug("form_base_query: "+str(args))
base_query = ""
if api.PARAM_Q in args and args[api.PARAM_Q] != "":
base_query += urllib.quote_plus(args[api.PARAM_Q])
if api.PARAM_VOL_STARTDATE in args or api.PARAM_VOL_ENDDATE in args:
startdate = None
if api.PARAM_VOL_STARTDATE in args and args[api.PARAM_VOL_STARTDATE] != "":
try:
startdate = datetime.datetime.strptime(
args[api.PARAM_VOL_STARTDATE].strip(), "%Y-%m-%d")
except:
logging.error("malformed start date: %s" %
args[api.PARAM_VOL_STARTDATE])
if not startdate:
# note: default vol_startdate is "tomorrow"
# in base, event_date_range YYYY-MM-DDThh:mm:ss/YYYY-MM-DDThh:mm:ss
# appending "Z" to the datetime string would mean UTC
startdate = datetime.date.today() + datetime.timedelta(days=1)
args[api.PARAM_VOL_STARTDATE] = startdate.strftime("%Y-%m-%d")
enddate = None
if api.PARAM_VOL_ENDDATE in args and args[api.PARAM_VOL_ENDDATE] != "":
try:
enddate = datetime.datetime.strptime(
args[api.PARAM_VOL_ENDDATE].strip(), "%Y-%m-%d")
except:
logging.error("malformed end date: %s" % args[api.PARAM_VOL_ENDDATE])
if not enddate:
enddate = datetime.date(startdate.year, startdate.month, startdate.day)
enddate = enddate + datetime.timedelta(days=1000)
args[api.PARAM_VOL_ENDDATE] = enddate.strftime("%Y-%m-%d")
daterangestr = '%s..%s' % (args[api.PARAM_VOL_STARTDATE],
args[api.PARAM_VOL_ENDDATE])
base_query += base_restrict_str("event_date_range", daterangestr)
if api.PARAM_VOL_PROVIDER in args and args[api.PARAM_VOL_PROVIDER] != "":
if re.match(r'[a-zA-Z0-9:/_. -]+', args[api.PARAM_VOL_PROVIDER]):
base_query += base_restrict_str("feed_providername",
args[api.PARAM_VOL_PROVIDER])
else:
# illegal providername
# TODO: throw 500
logging.error("illegal providername: " + args[api.PARAM_VOL_PROVIDER])
# TODO: injection attack on sort
if api.PARAM_SORT not in args:
args[api.PARAM_SORT] = "r"
# TODO: injection attacks in vol_loc
if args[api.PARAM_VOL_LOC] != "":
if api.PARAM_VOL_DIST not in args:
args[api.PARAM_VOL_DIST] = "25"
args[api.PARAM_VOL_DIST] = int(str(args[api.PARAM_VOL_DIST]))
if args[api.PARAM_VOL_DIST] < 10:
args[api.PARAM_VOL_DIST] = 10
#base_query += base_restrict_str("location", '@"%s" + %dmi' % \
# (args[api.PARAM_VOL_LOC],
# args[api.PARAM_VOL_DIST]))
if (args["lat"] != "" and args["long"] != ""
and args[api.PARAM_VOL_DIST] != ""):
lat, lng = float(args["lat"]), float(args["long"])
dist = float(args[api.PARAM_VOL_DIST])
base_query += "[latitude%%3E%%3D%.2f]" % (lat+GBASE_LOC_FIXUP - dist/69.1)
base_query += "[latitude%%3C%%3D%.2f]" % (lat+GBASE_LOC_FIXUP + dist/69.1)
base_query += "[longitude%%3E%%3D%.2f]" % (lng+GBASE_LOC_FIXUP - dist/50)
base_query += "[longitude%%3C%%3D%.2f]" % (lng+GBASE_LOC_FIXUP + dist/50)
# Base URL for snippets search on Base.
# Docs: http://code.google.com/apis/base/docs/2.0/attrs-queries.html
# TODO: injection attack on backend
if "backend" not in args:
args["backend"] = "http://www.google.com/base/feeds/snippets"
cust_arg = base_argname("customer")
if cust_arg not in args:
args[cust_arg] = BASE_CUSTOMER_ID
base_query += base_restrict_str("customer_id", int(args[cust_arg]))
#base_query += base_restrict_str("detailurl")
if api.PARAM_START not in args:
args[api.PARAM_START] = 1
# TODO: remove me-- hack to forcibly remove DNC listings for now
# (Base hasn't caught up to the takedown, not sure why...)
#base_query += '+-barackobama'
return base_query
# note: many of the XSS and injection-attack defenses are unnecessary
# given that the callers are also protecting us, but I figure better
# safe than sorry, and defense-in-depth.
def search(args):
"""run a Google Base search."""
def have_valid_query(args):
""" make sure we were given a value for at least one of these arguments """
valid_query = False
api_list = [api.PARAM_Q,
api.PARAM_TIMEPERIOD,
api.PARAM_VOL_LOC,
api.PARAM_VOL_STARTDATE,
api.PARAM_VOL_ENDDATE,
api.PARAM_VOL_DURATION,
api.PARAM_VOL_PROVIDER,
api.PARAM_VOL_STARTDAYOFWEEK]
for param in api_list:
if param in args and args[param]:
if param == api.PARAM_VOL_LOC:
# vol_loc must render a lat, long pair
if not args["lat"] or not args["long"]:
continue
valid_query = True
break
return valid_query
base_query = form_base_query(args)
query_url = args["backend"]
num_to_fetch = int(args[api.PARAM_START])
num_to_fetch += int(args[api.PARAM_NUM] * args[api.PARAM_OVERFETCH_RATIO])
if num_to_fetch > BASE_MAX_RESULTS:
num_to_fetch = BASE_MAX_RESULTS
query_url += "?max-results=" + str(num_to_fetch)
# We don't set "&start-index=" because that will interfere with
# deduping + pagination. Since we merge the results here in the
# app, we must perform de-duping starting at index zero every time
# in order to get reliable pagination.
query_url += "&orderby=" + base_orderby_arg(args)
query_url += "&content=" + "all"
query_url += "&bq=" + base_query
if not have_valid_query(args):
# no query + no location = no results
result_set = searchresult.SearchResultSet(urllib.unquote(query_url),
query_url,
[])
logging.debug("Base not called: no query given")
result_set.query_url = query_url
result_set.args = args
result_set.num_results = 0
result_set.estimated_results = 0
result_set.fetch_time = 0
result_set.parse_time = 0
return result_set
logging.debug("calling Base: "+query_url)
results = query(query_url, args, False)
logging.debug("Base call done.")
# Base doesn't implement day-of-week filtering
if (api.PARAM_VOL_STARTDAYOFWEEK in args and
args[api.PARAM_VOL_STARTDAYOFWEEK] != ""):
startday = args[api.PARAM_VOL_STARTDAYOFWEEK]
for i, res in enumerate(results):
dow = str(res.startdate.strftime("%w"))
if startday.find(dow) < 0:
del results[i]
return results
def query(query_url, args, cache):
"""run the actual Base query (no filtering or sorting)."""
result_set = searchresult.SearchResultSet(urllib.unquote(query_url),
query_url,
[])
result_set.query_url = query_url
result_set.args = args
fetch_start = time.time()
fetch_result = urlfetch.fetch(query_url,
deadline = api.CONST_MAX_FETCH_DEADLINE)
fetch_end = time.time()
result_set.fetch_time = fetch_end - fetch_start
if fetch_result.status_code != 200:
return result_set
result_content = fetch_result.content
parse_start = time.time()
# undo comma encoding -- see datahub/footprint_lib.py
result_content = re.sub(r';;', ',', result_content)
dom = minidom.parseString(result_content)
elems = dom.getElementsByTagName('entry')
for i, entry in enumerate(elems):
# Note: using entry.getElementsByTagName('link')[0] isn't very stable;
# consider iterating through them for the one where rel='alternate' or
# whatever the right thing is.
url = utils.xml_elem_text(entry, 'g:detailurl', '')
if not url:
# URL is required
continue
# ID is the 'stable id' of the item generated by base.
# Note that this is not the base url expressed as the Atom id element.
item_id = utils.xml_elem_text(entry, 'g:id', '')
# Base URL is the url of the item in base, expressed with the Atom id tag.
base_url = utils.xml_elem_text(entry, 'id', '')
snippet = utils.xml_elem_text(entry, 'g:abstract', '')
title = utils.xml_elem_text(entry, 'title', '')
location = utils.xml_elem_text(entry, 'g:location_string', '')
res = searchresult.SearchResult(url, title, snippet, location, item_id,
base_url)
# TODO: escape?
res.provider = utils.xml_elem_text(entry, 'g:feed_providername', '')
res.orig_idx = i+1
res.latlong = ""
latstr = utils.xml_elem_text(entry, 'g:latitude', '')
longstr = utils.xml_elem_text(entry, 'g:longitude', '')
if latstr and longstr and latstr != "" and longstr != "":
latval = float(latstr)
longval = float(longstr)
# divide by two because these can be negative numbers
if latval > GBASE_LOC_FIXUP/2:
latval -= GBASE_LOC_FIXUP
if longval > GBASE_LOC_FIXUP/2:
longval -= GBASE_LOC_FIXUP
res.latlong = str(latval) + "," + str(longval)
# TODO: remove-- working around a DB bug where all latlongs are the same
if "geocode_responses" in args:
res.latlong = geocode.geocode(location,
args["geocode_responses"]!="nocache" )
# res.event_date_range follows one of these two formats:
# <start_date>T<start_time> <end_date>T<end_time>
# <date>T<time>
res.event_date_range = utils.xml_elem_text(entry, 'g:event_date_range' '')
res.startdate = datetime.datetime.strptime("2000-01-01", "%Y-%m-%d")
res.enddate = datetime.datetime.strptime("2038-01-01", "%Y-%m-%d")
if res.event_date_range:
match = DATE_FORMAT_PATTERN.findall(res.event_date_range)
if not match:
# TODO(oansaldi): should we accept an event with an invalid date range?
logging.warning('bad date range: %s for %s' %
(res.event_date_range, url))
else:
# first match is start date/time
startdate = datetime.datetime.strptime(match[0], '%Y-%m-%dT%H:%M:%S')
# last match is either end date/time or start/date time
enddate = datetime.datetime.strptime(match[-1], '%Y-%m-%dT%H:%M:%S')
# protect against absurd dates
if startdate > res.startdate:
res.startdate = startdate
if enddate < res.enddate:
res.enddate = enddate
# posting.py currently has an authoritative list of fields in "argnames"
# that are available to submitted events which may later appear in GBase
# so with a few exceptions we want those same fields to become
# attributes of our result object
except_names = ["title", "description"]
for name in posting.argnames:
if name not in except_names:
# these attributes are likely to become part of the "g" namespace
# http://base.google.com/support/bin/answer.py?answer=58085&hl=en
setattr(res, name, utils.xml_elem_text(entry, "g:" + name, ''))
result_set.results.append(res)
if cache and res.item_id:
key = RESULT_CACHE_KEY + res.item_id
memcache.set(key, res, time=RESULT_CACHE_TIME)
result_set.num_results = len(result_set.results)
result_set.estimated_results = int(
utils.xml_elem_text(dom, "openSearch:totalResults", "0"))
parse_end = time.time()
result_set.parse_time = parse_end - parse_start
return result_set
def get_from_ids(ids):
"""Return a result set containing multiple results for multiple ids.
Args:
ids: List of stable IDs of volunteer opportunities.
Returns:
searchresult.SearchResultSet with just the entries in ids.
"""
result_set = searchresult.SearchResultSet('', '', [])
# First get all that we can from memcache
results = {}
try:
# get_multi returns a dictionary of the keys and values that were present
# in memcache. Even with the key_prefix specified, that key_prefix won't
# be on the keys in the returned dictionary.
hits = memcache.get_multi(ids, RESULT_CACHE_KEY)
except:
# TODO(mblain): Scope to only 'memcache down' exception.
logging.exception('get_from_ids: ignoring busted memcache. stack: %s',
''.join(traceback.format_stack()))
temp_results_dict = {}
for key in hits:
result = hits[key]
temp_results_dict[result.item_id] = result
# OK, we've collected what we can from memcache. Now look up the rest.
# Find the Google Base url from the datastore, then look that up in base.
missing_ids = []
for item_id in ids:
if not item_id in hits:
missing_ids.append(item_id)
datastore_results = modelutils.get_by_ids(models.VolunteerOpportunity,
missing_ids)
datastore_missing_ids = []
for item_id in ids:
if not item_id in datastore_results:
datastore_missing_ids.append(item_id)
if datastore_missing_ids:
logging.warning('Could not find entry in datastore for ids: %s' %
datastore_missing_ids)
# Bogus args for search. TODO: Remove these, why are they needed above?
args = {}
args[api.PARAM_VOL_STARTDATE] = (datetime.date.today() +
datetime.timedelta(days=1)).strftime("%Y-%m-%d")
datetm = time.strptime(args[api.PARAM_VOL_STARTDATE], "%Y-%m-%d")
args[api.PARAM_VOL_ENDDATE] = (datetime.date(datetm.tm_year, datetm.tm_mon,
datetm.tm_mday) + datetime.timedelta(days=60))
# TODO(mblain): Figure out how to pull in multiple base entries in one call.
for (item_id, volunteer_opportunity_entity) in datastore_results.iteritems():
if not volunteer_opportunity_entity.base_url:
logging.warning('no base_url in datastore for id: %s' % item_id)
continue
temp_results = query(volunteer_opportunity_entity.base_url, args, True)
if not temp_results.results:
# The base URL may have changed from under us. Oh well.
# TODO: "info" is not defined so this logging line breaks.
# logging.warning('Did not get results from base. id: %s base_url: %s '
# 'Last update: %s Previous failure: %s' %
# (id, info.base_url, info.last_base_url_update,
# info.last_base_url_update_failure))
volunteer_opportunity_entity.base_url_failure_count += 1
volunteer_opportunity_entity.last_base_url_update_failure = \
datetime.datetime.now()
volunteer_opportunity_entity.put()
continue
if temp_results.results[0].item_id != item_id:
logging.error('First result is not expected result. '
'Expected: %s Found: %s. len(results): %s' %
(item_id, temp_results.results[0].item_id, len(results)))
# Not sure if we should touch the VolunteerOpportunity or not.
continue
temp_result = temp_results.results[0]
temp_results_dict[temp_result.item_id] = temp_result
# Our temp result set should now contain both stuff that was looked up from
# cache as well as stuff that got fetched directly from Base. Now order
# the events according to the original list of id's.
# First reverse the list of id's, so events come out in the right order
# after being prepended to the events list.
ids.reverse()
for id in ids:
result = temp_results_dict.get(id, None)
if result:
result_set.results.insert(0, result)
return result_set
| Python |
MAPS_API_KEYS = {}
MAPS_API_KEYS['www.adamsah.net:8080'] = 'ABQIAAAAxq97AW0x5_CNgn6-nLxSrxQPiECf40c9sk8_oYaM1tejJgt_DBQGX9FrJhDYEm_Q_8aqbVKUzollqg'
MAPS_API_KEYS['www.adamsah.net:8081'] = 'ABQIAAAAxq97AW0x5_CNgn6-nLxSrxSatViGQnF70MoboVDRRVzoLj4T8hTGWmAjUKagGrnWr-xTwSWv4XFuiw'
MAPS_API_KEYS['www.adamsah.net:8082'] = 'ABQIAAAAxq97AW0x5_CNgn6-nLxSrxTGEVewtGazShFSG9KX3KfJ-OzRuxRYmck9mME2a1DVHyoL1GbqprKLeA'
MAPS_API_KEYS['www.adamsah.net:8083'] = 'ABQIAAAAxq97AW0x5_CNgn6-nLxSrxTaJ1NpVtTebRGVkFgIOxwdBr6gvhSK1BMuNuwwydj3shBNvtPyShE9CA'
| Python |
# Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
geocode client, which uses Google Maps API
"""
import re
import urllib
import logging
import time
from datetime import datetime
from google.appengine.api import urlfetch
from google.appengine.api import memcache
import api
def is_latlong(instr):
"""check whether a string is a valid lat-long."""
return (re.match(r'^\s*[0-9.+-]+\s*,\s*[0-9.+-]+\s*$', instr) != None)
def is_latlongzoom(instr):
"""check whether a string is a valid lat-long-zoom."""
return (re.match(r'^\s*[0-9.+-]+\s*,\s*[0-9.+-]+\s*,\s*[0-9.+-]+\s*$', instr) != None)
def geocode(addr, usecache=True, retries=4):
"""convert a human-readable address into a "lat,long" value (string)."""
loc = addr.lower().strip()
# already geocoded-- just return
if is_latlongzoom(loc):
return loc
if is_latlong(loc):
# regexp allow missing comma
# TODO: pick a smart default zoom, depending on population density.
return loc+",4"
loc = re.sub(r'^[^0-9a-z]+', r'', loc)
loc = re.sub(r'[^0-9a-z]+$', r'', loc)
loc = re.sub(r'\s\s+', r' ', loc)
logging.debug("geocode: loc="+loc)
memcache_key = "geocode:"+loc
val = memcache.get(memcache_key)
if usecache and val:
logging.debug("geocode: cache hit loc="+loc+" val="+val)
return val
params = urllib.urlencode(
{'q':loc.lower(), 'output':'csv',
'oe':'utf8', 'sensor':'false', 'gl':'us',
'key':'ABQIAAAAxq97AW0x5_CNgn6-nLxSrxQuOQhskTx7t90ovP5xOuY'+\
'_YrlyqBQajVan2ia99rD9JgAcFrdQnTD4JQ'})
fetchurl = "http://maps.google.com/maps/geo?%s" % params
logging.debug("geocode: cache miss, trying "+fetchurl)
fetch_result = urlfetch.fetch(fetchurl,
deadline = api.CONST_MAX_FETCH_DEADLINE)
if fetch_result.status_code != 200:
# fail and also don't cache
return ""
res = fetch_result.content
if "," not in res:
# fail and also don't cache
return ""
try:
# pylint: disable-msg=W0612
respcode, zoom, lat, lng = res.split(",")
except:
logging.error(str(datetime.now())+
": unparseable geocoder response: "+res[0:80])
respcode, zoom, lat, lng = 999, 0, 0, 0
respcode = int(respcode)
if respcode == 500 or respcode == 620:
logging.warn(str(datetime.now())+"geocoder quota exceeded-- sleeping...")
time.sleep(1)
return geocode(addr, usecache, retries-1)
# these results get cached
val = ""
if respcode == 200:
val = lat+","+lng+","+zoom
memcache.set(memcache_key, val)
return val
| Python |
# Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
paths used in the app
"""
URL_HOME = '/'
URL_OLD_HOME = '/home'
URL_CONSUMER_UI_SEARCH = '/search'
URL_CONSUMER_UI_SEARCH_REDIR = '/search_form'
URL_API_SEARCH = '/api/volopps'
URL_LEGACY_API_SEARCH = '/api/search'
URL_MY_EVENTS = '/myevents'
URL_FRIENDS = '/friends'
URL_POST = '/post'
URL_ADMIN = '/admin'
URL_MODERATE = '/moderate'
URL_MODERATE_BLACKLIST = '/moderateblacklist'
URL_UI_SNIPPETS = '/ui_snippets'
URL_UI_MY_SNIPPETS = '/ui_my_snippets'
URL_REDIRECT = '/url'
URL_DATAHUB_DASHBOARD = '/dashboard/datahub'
URL_ACTION = '/action' # User actions like starring
URL_PSA = '/psa' # Redirect to home page for tracking adsense psas
STATIC_CONTENT_LOCATION = 'http://footprint2009dev.googlecode.com/svn/trunk/frontend/html/'
# Mappings between appliation URLs (key) and static content
# files to fetch (STATIC_CONTENT_LOCATION + value).
# So, for example, the application URL '/about' maps to
# the remote URL 'http://code.google.com/.../trunk/frontend/html/about_us.html'
STATIC_CONTENT_FILES = {
'/about' : 'about_us.html',
'/privacypolicy' : 'privacy_policy.html',
'/contentpolicy' : 'content_policy.html',
'/spreadsheet' : 'spreadsheet.html',
'/publishers' : 'publishers.html',
'/help' : 'help.html',
'/faq' : 'faq.html',
'/tos' : 'tos.html',
'/api_tos' : 'api_tos.html',
'/apps' : 'apps.html',
'/dmca' : 'dmca.html',
'/docs/api.html' : 'api.html',
'/partner_terms' : 'partner_terms.html',
'/apps/gmail' : 'apps-gmail.html',
'/apps/typepad' : 'apps-typepad.html',
'/apps/blogger' : 'apps-blogger.html',
'/apps/googlesites' : 'apps-googlesites.html',
'/apps/wordpress' : 'apps-wordpress.html',
'/code' : 'code.html',
'/posting' : 'spreadsheet.html',
'/guide' : 'tour.html',
}
| Python |
# Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
main() for sheetchecker
"""
# view classes aren inherently not pylint-compatible
# pylint: disable-msg=C0103
# pylint: disable-msg=W0232
# pylint: disable-msg=E1101
# pylint: disable-msg=R0903
import logging
import re
import os
from google.appengine.api import urlfetch
from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import run_wsgi_app
from google.appengine.ext.webapp import template
import sheetchecker.parse_gspreadsheet as parse_gspreadsheet
CHECK_SHEET_TEMPLATE = "checksheet.html"
def render_template(template_filename, template_values):
"""wrapper for template.render() which handles path."""
path = os.path.join(os.path.dirname(__file__),
template_filename)
rendered = template.render(path, template_values)
return rendered
class Check(webapp.RequestHandler):
"""prefix query on sheetchecker."""
def get(self):
"""HTTP get method."""
sheeturl = self.request.get('url')
template_values = {
"sheeturl" : sheeturl,
"sheetfeedurl" : "",
"msgs" : None,
"data" : None
}
match = re.search(r'key=([^& ]+)', sheeturl)
if match:
url = "http://spreadsheets.google.com/feeds/cells/"
url += match.group(1).strip() + "/1/public/basic"
fetch_result = urlfetch.fetch(url)
if fetch_result.status_code != 200:
self.response.out.write("<html><body>error fetching URL " +
url + "</body></html>")
return
contents = fetch_result.content
logging.info("fetched %d bytes: %s..." % (len(contents), contents[:80]))
data, msgs = parse_gspreadsheet.parse(contents)
logging.info("%d msgs in %s" % (len(msgs), sheeturl))
template_values["sheetfeedurl"] = url
template_values["msgs"] = msgs
template_values["data"] = data
elif sheeturl != "":
self.response.out.write("<html><body>malformed sheet URL " +
" - missing &key=</body></html>")
return
self.response.out.write(template.render(CHECK_SHEET_TEMPLATE,
template_values))
APP = webapp.WSGIApplication(
[('/sheetchecker/check', Check)],
debug=True)
def main():
"""main for standalone execution."""
run_wsgi_app(APP)
if __name__ == "__main__":
main()
| Python |
# Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
parser for feed stored in a google spreadsheet
(note that this is different from other parsers inasmuch as it
expects the caller to pass in the providerID and providerName)
"""
# TODO: share this code between frontend and datahub
# see http://code.google.com/p/footprint2009dev/issues/detail?id=150
# typical cell
#<entry>
#<id>http://spreadsheets.google.com/feeds/cells/pMY64RHUNSVfKYZKPoVXPBg
#/1/public/basic/R14C13</id>
#<updated>2009-04-28T03:29:56.957Z</updated>
#<category scheme='http://schemas.google.com/spreadsheets/2006'
#term='http://schemas.google.com/spreadsheets/2006#cell'/>
#<title type='text'>M14</title>
#<content type='text'>ginny@arthur.edu</content>
#<link rel='self' type='application/atom+xml' href='http://spreadsheets.
#google.com/feeds/cells/pMY64RHUNSVfKYZKPoVXPBg/1/public/basic/R14C13'/>
#</entry>
import re
import logging
from google.appengine.api import urlfetch
import geocode
MAX_BLANKROWS = 2
# TODO: right thing is to create a class for spreadsheets...
CURRENT_ROW = None
MESSAGES = []
DATA = None
HEADER_STARTCOL = None
HEADER_ROW = None
def parser_error(msg):
"""capture an error in its current context."""
global MESSAGES
if CURRENT_ROW != None:
msg = "row "+str(CURRENT_ROW)+": "+msg
msg += "<br/>\n starting with: "
for col in range(5):
val = cellval(CURRENT_ROW, col)
if val == None:
val = ""
msg += val+" | "
MESSAGES.append("ERROR: "+msg)
def raw_recordval(record, key):
"""get a cell value, or empty string."""
if key in record:
return str(record[key]).strip()
return ""
def recordval(record, key):
"""get a cell value, replacing whitespace with space."""
return re.sub(r'\s+', ' ', raw_recordval(record, key))
KNOWN_ORGS = {}
def get_dtval(record, field_name):
"""parse a field as a datetime."""
val = recordval(record, field_name)
if (val != "" and not re.match(r'\d\d?/\d\d?/\d\d\d\d', val)):
parser_error("bad value in "+field_name+": '"+val+"'-- try MM/DD/YYYY")
return val
def get_tmval(record, field_name):
"""parse a field as a time-of-day."""
val = recordval(record, field_name)
if (val != "" and not re.match(r'\d?\d:\d\d(:\d\d)?', val)):
parser_error("bad value in "+field_name+": '"+val+"'-- try HH:MM:SS")
return val
def get_boolval(record, field_name):
"""parse a field as a yes/no field-- note that blank is allowed."""
val = recordval(record, field_name)
if val.lower() not in ["y", "yes", "n", "no", ""]:
# TODO: support these alternates in the datahub!
parser_error("bad value in "+field_name+": '"+val+"'-- try 'Yes' or 'No'")
return val
def get_intval(record, field_name):
"""parse a field as a time-of-day."""
val = recordval(record, field_name)
if val != "" and not re.match('[0-9]+', val):
parser_error("bad value in "+field_name+": '"+val+"'-- try a number")
return val
def get_minlen(record, field_name, minlen):
"""parse a field as a minlen string."""
val = recordval(record, field_name)
if val == "":
parser_error("missing value in "+field_name+": '"+val+"'-- field required.")
elif len(val) < minlen:
parser_error("value not long enough in "+field_name+": '"+val+"'-- "+
"requires %d characters" % minlen)
return val
def get_blank(record, field_name, reason=" in this case."):
"""parse a field as a string that must be blank."""
val = recordval(record, field_name)
if val == "":
return ""
else:
parser_error("field "+field_name+" must be blank"+reason)
return val
def cellval(row, col):
"""get a single cell value."""
key = 'R'+str(row)+'C'+str(col)
if key not in DATA:
return None
return DATA[key]
def parse_gspreadsheet(instr, updated):
"""load a spreadsheet into a two dimensional array."""
# look ma, watch me parse XML a zillion times faster!
#<entry><id>http://spreadsheets.google.com/feeds/cells/pMY64RHUNSVfKYZKPoVXPBg
#/1/public/basic/R14C15</id><updated>2009-04-28T03:34:21.900Z</updated>
#<category scheme='http://schemas.google.com/spreadsheets/2006'
#term='http://schemas.google.com/spreadsheets/2006#cell'/><title type='text'>
#O14</title><content type='text'>http://www.fake.org/vol.php?id=4</content>
#<link rel='self' type='application/atom+xml'
#href='http://spreadsheets.google.com/feeds/cells/pMY64RHUNSVfKYZKPoVXPBg/1/
#public/basic/R14C15'/></entry>
regexp = re.compile('<entry>.+?(R(\d+)C(\d+))</id>'+
'<updated.*?>(.+?)</updated>.*?'+
'<content.*?>(.+?)</content>.+?</entry>', re.DOTALL)
maxrow = maxcol = 0
for match in re.finditer(regexp, instr):
lastupd = re.sub(r'([.][0-9]+)?Z?$', '', match.group(4)).strip()
#print "lastupd='"+lastupd+"'"
updated[match.group(1)] = lastupd.strip("\r\n\t ")
val = match.group(5).strip("\r\n\t ")
DATA[match.group(1)] = val
row = match.group(2)
if row > maxrow:
maxrow = row
col = match.group(3)
if col > maxcol:
maxcol = col
#print row, col, val
return maxrow, maxcol
def find_header_row(regexp_str):
"""location the header row in a footprint spreadsheet."""
regexp = re.compile(regexp_str, re.IGNORECASE|re.DOTALL)
global HEADER_ROW, HEADER_STARTCOL
HEADER_ROW = HEADER_STARTCOL = None
for row in range(20):
if HEADER_ROW:
break
for col in range(5):
val = cellval(row, col)
if (val and re.search(regexp, val)):
HEADER_ROW = row
HEADER_STARTCOL = col
break
if HEADER_ROW == None or HEADER_STARTCOL == None:
parser_error("failed to parse this as a footprint spreadsheet. "+
"No header row found: looked for "+regexp_str)
def parse(instr):
"""main function for parsing footprint spreadsheets."""
# TODO: a spreadsheet should really be an object and cellval a method
global DATA, MESSAGES, CURRENT_ROW
DATA = {}
MESSAGES = []
CURRENT_ROW = None
updated = {}
parse_gspreadsheet(instr, updated)
# find header row: look for "opportunity title" (case insensitive)
find_header_row('opportunity\s*title')
if not HEADER_ROW or not HEADER_STARTCOL:
return DATA, MESSAGES
header_colidx = {}
header_names = {}
header_col = HEADER_STARTCOL
while True:
header_str = cellval(HEADER_ROW, header_col)
if not header_str:
break
field_name = None
header_str = header_str.lower()
if header_str.find("title") >= 0:
field_name = "OpportunityTitle"
elif header_str.find("organization") >= 0 and \
header_str.find("sponsor") >= 0:
field_name = "SponsoringOrganization"
elif header_str.find("description") >= 0:
field_name = "Description"
elif header_str.find("skills") >= 0:
field_name = "Skills"
elif header_str.find("location") >= 0 and header_str.find("name") >= 0:
field_name = "LocationName"
elif header_str.find("street") >= 0:
field_name = "LocationStreet"
elif header_str.find("city") >= 0:
field_name = "LocationCity"
elif header_str.find("state") >= 0 or header_str.find("province") >= 0:
field_name = "LocationProvince"
elif header_str.find("zip") >= 0 or header_str.find("postal") >= 0:
field_name = "LocationPostalCode"
elif header_str.find("country") >= 0:
field_name = "LocationCountry"
elif header_str.find("start") >= 0 and header_str.find("date") >= 0:
field_name = "StartDate"
elif header_str.find("start") >= 0 and header_str.find("time") >= 0:
field_name = "StartTime"
elif header_str.find("end") >= 0 and header_str.find("date") >= 0:
field_name = "EndDate"
elif header_str.find("end") >= 0 and header_str.find("time") >= 0:
field_name = "EndTime"
elif header_str.find("contact") >= 0 and header_str.find("name") >= 0:
field_name = "ContactName"
elif header_str.find("email") >= 0 or header_str.find("e-mail") >= 0:
field_name = "ContactEmail"
elif header_str.find("phone") >= 0:
field_name = "ContactPhone"
elif header_str.find("website") >= 0 or header_str.find("url") >= 0:
field_name = "URL"
elif header_str.find("often") >= 0:
field_name = "Frequency"
elif header_str.find("days") >= 0 and header_str.find("week") >= 0:
field_name = "DaysOfWeek"
elif header_str.find("paid") >= 0:
field_name = "Paid"
elif header_str.find("commitment") >= 0 or header_str.find("hours") >= 0:
field_name = "CommitmentHours"
elif header_str.find("age") >= 0 and header_str.find("min") >= 0:
field_name = "MinimumAge"
elif header_str.find("kid") >= 0:
field_name = "KidFriendly"
elif header_str.find("senior") >= 0 and header_str.find("only") >= 0:
field_name = "SeniorsOnly"
elif header_str.find("sex") >= 0 or header_str.find("gender") >= 0:
field_name = "SexRestrictedTo"
elif header_str.find("volunteer appeal") >= 0:
field_name = None
else:
parser_error("couldn't map header '"+header_str+"' to a field name.")
if field_name != None:
header_colidx[field_name] = header_col
header_names[header_col] = field_name
#print header_str, "=>", field_name
header_col += 1
if len(header_names) < 10:
parser_error("too few fields found: "+str(len(header_names)))
# check to see if there's a header-description row
header_desc = cellval(HEADER_ROW+1, HEADER_STARTCOL)
if not header_desc:
parser_error("blank row not allowed below header row")
header_desc = header_desc.lower()
data_startrow = HEADER_ROW + 1
if header_desc.find("up to") >= 0:
data_startrow += 1
# find the data
CURRENT_ROW = data_startrow
blankrows = 0
numopps = 0
while True:
blankrow = True
#rowstr = "row="+str(row)+"\n"
record = {}
record['LastUpdated'] = '0000-00-00'
for field_name in header_colidx:
col = header_colidx[field_name]
val = cellval(CURRENT_ROW, col)
if val:
blankrow = False
else:
val = ""
#rowstr += " "+field_name+"="+val+"\n"
record[field_name] = val
key = 'R'+str(CURRENT_ROW)+'C'+str(col)
if (key in updated and
updated[key] > record['LastUpdated']):
record['LastUpdated'] = updated[key]
if blankrow:
blankrows += 1
if blankrows > MAX_BLANKROWS:
break
else:
numopps += 1
blankrows = 0
record['oppid'] = str(numopps)
get_minlen(record, 'OpportunityTitle', 4)
get_minlen(record, 'Description', 15)
location_name = get_minlen(record, 'LocationName', 4)
if location_name == "virtual":
is_virtual = True
elif location_name.lower() == "virtaul" or location_name.lower() == "virtual":
parser_error("misspelled location name: "+location_name+
" -- perhaps you meant 'virtual'? (note spelling)")
is_virtual = True
else:
is_virtual = False
if is_virtual:
reason = " for virtual opportunities-- if you want both a location and"
reason += " a virtual opportunity, then provide two separate records."
get_blank(record, "LocationStreet", reason)
get_blank(record, "LocationCity", reason)
get_blank(record, "LocationProvince", reason)
get_blank(record, "LocationPostalCode", reason)
get_blank(record, "LocationCountry", reason)
else:
# TODO: appengine 30sec timeouts render this ambiguous/confuse for users
check_locations = False
if check_locations:
addr = recordval(record, "LocationStreet")
addr += " "+recordval(record, "LocationCity")
addr += " "+recordval(record, "LocationProvince")
addr += " "+recordval(record, "LocationPostalCode")
addr += " "+recordval(record, "LocationCountry")
latlong = geocode.geocode(addr)
if latlong == "":
parser_error("could not convert '"+addr+"' to a location "+
"on the map: changing the address will help your "+
"listing be found by users.")
start_date = recordval(record, "StartDate")
if start_date == "ongoing":
ongoing = True
elif start_date.lower().find("ong") == 0:
parser_error("misspelled Start Date: "+start_date+
" -- perhaps you meant 'ongoing'? (note spelling)")
ongoing = True
elif start_date == "":
parser_error("Start Date may not be blank.")
ongoing = True
else:
ongoing = False
if ongoing:
start_time = recordval(record, "StartTime")
if start_time != "" and start_time != "ongoing":
parser_error("ongoing event should have blank Start Time.")
end_date = recordval(record, "EndDate")
if end_date != "" and end_date != "ongoing":
parser_error("ongoing event should have blank End Date.")
end_time = recordval(record, "EndTime")
if end_time != "" and end_time != "ongoing":
parser_error("ongoing event should have blank End Time.")
else:
get_dtval(record, "StartDate")
get_tmval(record, "StartTime")
get_dtval(record, "EndDate")
get_tmval(record, "EndTime")
email = recordval(record, "ContactEmail")
if email != "" and email.find("@") == -1:
parser_error("malformed email address: "+email)
url = recordval(record, "URL")
# TODO: appengine 30sec timeouts render this ambiguous/confuse for users
check_urls = False
if check_urls:
try:
fetch_result = urlfetch.fetch(url)
if fetch_result.status_code >= 400:
parser_error("problem fetching url '"+url+"': HTTP status code "+
fetch_result.status_code)
except urlfetch.InvalidURLError:
parser_error("invalid url '"+url+"'")
except urlfetch.ResponseTooLargeError:
parser_error("problem fetching url '"+url+"': response too large")
except:
parser_error("problem fetching url '"+url+"'")
daysofweek = recordval(record, "DaysOfWeek").split(",")
for dow in daysofweek:
lcdow = dow.strip().lower()
if lcdow not in ["sat", "saturday",
"sun", "sunday",
"mon", "monday",
"tue", "tues", "tuesday",
"wed", "weds", "wednesday",
"thu", "thur", "thurs", "thursday",
"fri", "friday", ""]:
# TODO: support these alternates in the datahub!
parser_error("malformed day of week: '%s'" % dow)
get_boolval(record, "Paid")
get_intval(record, "CommitmentHours")
get_intval(record, "MinimumAge")
get_boolval(record, "KidFriendly")
get_boolval(record, "SeniorsOnly")
sexrestrict = recordval(record, "SexRestrictedTo")
if sexrestrict.lower() not in ["women", "men", "either", ""]:
parser_error("bad SexRestrictedTo-- try Men, Women, Either or (blank).")
org = recordval(record, 'SponsoringOrganization')
if org == "":
parser_error("missing Sponsoring Organization-- this field is required."+
" (it can be an informal name, or even a person's name).")
else:
get_minlen(record, 'SponsoringOrganization', 4)
freq = recordval(record, 'Frequency').lower()
if not (freq == "" or freq == "once" or freq == "daily" or
freq == "weekly" or freq == "every other week" or
freq == "monthly"):
parser_error("unsupported frequency: '"+
recordval(record, 'Frequency')+"'")
CURRENT_ROW += 1
if len(MESSAGES) == 0:
MESSAGES.append("spreadsheet parsed correctly! Feel free to submit.")
return DATA, MESSAGES
| Python |
# Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
toss all the scoring code into one place (rather than a class file)
because scoring tends to get complex quickly.
"""
from datetime import datetime
import logging
import math
import api
import view_helper
def compare_scores(val1, val2):
"""helper function for sorting."""
diff = val2.score - val1.score
if (diff > 0):
return 1
if (diff < 0):
return -1
return 0
def score_results_set(result_set, args):
"""sort results by score, and for each, set .score, .scorestr, .score_notes"""
logging.debug(str(datetime.now())+": score_results_set(): start")
idlist = map(lambda x: x.item_id, result_set.results)
# handle rescoring on interest weights
others_interests = view_helper.get_interest_for_opportunities(idlist)
total_results = float(len(result_set.results))
for i, res in enumerate(result_set.results):
score = 1.0
score_notes = ""
# keywordless queries should rank by location and time, not relevance.
if api.PARAM_Q in args and args[api.PARAM_Q] != "":
# lower ranking items in the backend = lower ranking here (roughly 1/rank)
rank_mult = (total_results - i)/total_results
score *= rank_mult
score_notes += " backend multiplier=%.3f (rank=%d)\n" % (i, rank_mult)
# TODO: match on start time, etc.
ONEDAY = 24.0 * 3600.0
MAXTIME = 500.0 * ONEDAY
start_delta = res.startdate - datetime.now()
start_delta_secs = start_delta.days*ONEDAY + start_delta.seconds
start_delta_secs = min(max(start_delta_secs, 0), MAXTIME)
end_delta = res.enddate - datetime.now()
end_delta_secs = end_delta.days*ONEDAY + end_delta.seconds
end_delta_secs = min(max(end_delta_secs, start_delta_secs), MAXTIME)
date_dist_multiplier = 1
if end_delta_secs <= 0:
date_dist_multiplier = .0001
if start_delta_secs > 0:
# further out start date = lower rank (roughly 1/numdays)
date_dist_multiplier = 1.0/(start_delta_secs/ONEDAY)
score *= date_dist_multiplier
score_notes += " date_mult=" + str(date_dist_multiplier)
score_notes += " start=%s (%+g days)" % (
res.startdate, start_delta_secs / ONEDAY)
score_notes += " end=%s (%+g days)" % (
res.enddate, end_delta_secs / ONEDAY)
score_notes += "\n"
# boost short events
delta_secs = end_delta_secs - start_delta_secs
if delta_secs > 0:
# up to 14 days gets a boost
ddays = 10*max(14 - delta_secs/ONEDAY, 1.0)
date_delta_multiplier = math.log10(ddays)
else:
date_delta_multiplier = 1
score *= date_delta_multiplier
score_notes += " date_delta_mult=%.3f (%g days)\n" % (
date_delta_multiplier, delta_secs / float(ONEDAY))
if (("lat" not in args) or args["lat"] == "" or
("long" not in args) or args["long"] == "" or
res.latlong == ""):
geo_dist_multiplier = 0.5
else:
# TODO: error in the DB, we're getting same geocodes for everything
lat, lng = res.latlong.split(",")
latdist = float(lat) - float(args["lat"])
lngdist = float(lng) - float(args["long"])
# keep one value to right of decimal
delta_dist = latdist*latdist + lngdist*lngdist
logging.debug("qloc=%s,%s - listing=%g,%g - dist=%g,%g - delta = %g" %
(args["lat"], args["long"], float(lat), float(lng),
latdist, lngdist, delta_dist))
# reasonably local
if delta_dist > 0.025:
delta_dist = 0.9 + delta_dist
else:
delta_dist = delta_dist / (0.025 / 0.9)
if delta_dist > 0.999:
delta_dist = 0.999
geo_dist_multiplier = 1.0 - delta_dist
interest = -1
if res.item_id in others_interests:
interest = others_interests[res.item_id]
elif "test_stars" in args:
interest = i % 6
score *= geo_dist_multiplier
score_notes += " geo multiplier=" + str(geo_dist_multiplier)
if interest >= 0:
# TODO: remove hocus-pocus math
interest_weight = (math.log(interest+1.0)/math.log(6.0))**3
score *= interest_weight
score_notes += " "+str(interest)+"-stars="+str(interest_weight)
res.set_score(score, score_notes)
result_set.results.sort(cmp=compare_scores)
logging.debug(str(datetime.now())+": score_results_set(): done")
| Python |
# Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
high-level routines for querying backend datastores and processing the results.
"""
import calendar
import datetime
import hashlib
import logging
from google.appengine.api import memcache
import api
import base_search
import geocode
import scoring
from fastpageviews import pagecount
CACHE_TIME = 24*60*60 # seconds
# args is expected to be a list of args
# and any path info is supposed to be homogenized into this,
# e.g. /listing/56_foo should be resolved into [('id',56)]
# by convention, repeated args are ignored, LAST ONE wins.
def search(args):
"""run a search against the backend specified by the 'backend' arg.
Returns a result set that's been (a) de-dup'd ("merged") and (b) truncated
to the appropriate number of results ("clipped"). Impression tracking
happens here as well."""
# TODO(paul): Create a QueryParams object to handle validation.
# Validation should be lazy, so that (for example) here
# only 'num' and 'start' are validated, since we don't
# yet need the rest. QueryParams can have a function to
# create a normalized string, for the memcache key.
# pylint: disable-msg=C0321
normalize_query_values(args)
# TODO: query param (& add to spec) for defeating the cache (incl FastNet)
# I (mblain) suggest using "zx", which is used at Google for most services.
# TODO: Should construct our own normalized query string instead of
# using the browser's querystring.
args_array = [str(key)+'='+str(value) for (key, value) in args.items()]
args_array.sort()
normalized_query_string = str('&'.join(args_array))
use_cache = True
if api.PARAM_CACHE in args and args[api.PARAM_CACHE] == '0':
use_cache = False
logging.debug('Not using search cache')
# note: key cannot exceed 250 bytes
memcache_key = hashlib.md5('search:' + normalized_query_string).hexdigest()
start = int(args[api.PARAM_START])
num = int(args[api.PARAM_NUM])
result_set = None
if use_cache:
result_set = memcache.get(memcache_key)
if result_set:
logging.debug('in cache: "' + normalized_query_string + '"')
if len(result_set.merged_results) < start + num:
logging.debug('but too small-- rerunning query...')
result_set = None
else:
logging.debug('not in cache: "' + normalized_query_string + '"')
if not result_set:
result_set = fetch_result_set(args)
memcache.set(memcache_key, result_set, time=CACHE_TIME)
result_set.clip_merged_results(start, num)
# TODO: for better results, we should segment CTR computation by
# homepage vs. search views, etc. -- but IMHO it's better to give
# up and outsource stats to a web-hosted service.
if 'key' in args and args['key'] == pagecount.TEST_API_KEY:
logging.debug("search(): not tracking testapi key views")
# needed to populate stats
result_set.track_views(num_to_incr=0)
else:
result_set.track_views(num_to_incr=1)
return result_set
def normalize_query_values(args):
"""Pre-processes several values related to the search API that might be
present in the query string."""
num = 10
if api.PARAM_NUM in args:
num = int(args[api.PARAM_NUM])
if num < 1:
num = 1
if num > 999:
num = 999
args[api.PARAM_NUM] = num
start_index = 1
if api.PARAM_START in args:
start_index = int(args[api.PARAM_START])
if start_index < 1:
start_index = 1
if start_index > 1000-num:
start_index = 1000-num
args[api.PARAM_START] = start_index
overfetch_ratio = 2.0
if api.PARAM_OVERFETCH_RATIO in args:
overfetch_ratio = float(args[api.PARAM_OVERFETCH_RATIO])
if overfetch_ratio < 1.0:
overfetch_ratio = 1.0
if overfetch_ratio > 10.0:
overfetch_ratio = 10.0
args[api.PARAM_OVERFETCH_RATIO] = overfetch_ratio
if api.PARAM_TIMEPERIOD in args:
period = args[api.PARAM_TIMEPERIOD]
# No need to pass thru, just convert period to discrete date args.
del args[api.PARAM_TIMEPERIOD]
date_range = None
today = datetime.date.today()
if period == 'today':
date_range = (today, today)
elif period == 'this_weekend':
days_to_sat = 5 - today.weekday()
delta = datetime.timedelta(days=days_to_sat)
this_saturday = today + delta
this_sunday = this_saturday + datetime.timedelta(days=1)
date_range = (this_saturday, this_sunday)
elif period == 'this_week':
days_to_mon = 0 - today.weekday()
delta = datetime.timedelta(days=days_to_mon)
this_monday = today + delta
this_sunday = this_monday + datetime.timedelta(days=6)
date_range = (this_monday, this_sunday)
elif period == 'this_month':
days_to_first = 1 - today.day
delta = datetime.timedelta(days=days_to_first)
first_of_month = today + delta
days_to_month_end = calendar.monthrange(today.year, today.month)[1] - 1
delta = datetime.timedelta(days=days_to_month_end)
last_of_month = first_of_month + delta
date_range = (first_of_month, last_of_month)
if date_range:
start_date = date_range[0].strftime("%Y-%m-%d")
end_date = date_range[1].strftime("%Y-%m-%d")
args[api.PARAM_VOL_STARTDATE] = start_date
args[api.PARAM_VOL_ENDDATE] = end_date
logging.debug("date range: "+ start_date + '...' + end_date)
def fetch_result_set(args):
"""Validate the search parameters, and perform the search."""
if api.PARAM_Q not in args:
args[api.PARAM_Q] = ""
# api.PARAM_OUTPUT is only used by callers (the view)
# (though I can imagine some output formats dictating which fields are
# retrieved from the backend...)
#
#if args[api.PARAM_OUTPUT] not in ['html', 'tsv', 'csv', 'json', 'rss',
# 'rssdesc', 'xml', 'snippets_list']
#
# TODO: csv list of fields
#if args[api.PARAM_FIELDS] not in ['all', 'rss']:
# TODO: process dbg -- currently, anything goes...
# RESERVED: v
# RESERVED: sort
# RESERVED: type
args["lat"] = args["long"] = ""
if api.PARAM_VOL_LOC in args:
zoom = 5
if geocode.is_latlong(args[api.PARAM_VOL_LOC]):
args["lat"], args["long"] = args[api.PARAM_VOL_LOC].split(",")
elif geocode.is_latlongzoom(args[api.PARAM_VOL_LOC]):
args["lat"], args["long"], zoom = args[api.PARAM_VOL_LOC].split(",")
else:
res = geocode.geocode(args[api.PARAM_VOL_LOC])
if res != "":
args["lat"], args["long"], zoom = res.split(",")
args["lat"] = args["lat"].strip()
args["long"] = args["long"].strip()
if api.PARAM_VOL_DIST not in args:
zoom = int(zoom)
if zoom == 1: # country
args[api.PARAM_VOL_DIST] = 500
elif zoom == 2: # region
args[api.PARAM_VOL_DIST] = 300
elif zoom == 3: # county
args[api.PARAM_VOL_DIST] = 100
elif zoom == 4 or zoom == 0: # city/town
args[api.PARAM_VOL_DIST] = 50
elif zoom == 5: # postal code
args[api.PARAM_VOL_DIST] = 25
elif zoom > 5: # street or level
args[api.PARAM_VOL_DIST] = 10
else:
args[api.PARAM_VOL_LOC] = args[api.PARAM_VOL_DIST] = ""
result_set = base_search.search(args)
scoring.score_results_set(result_set, args)
result_set.dedup()
if (not result_set.has_more_results
and result_set.num_merged_results < int(args[api.PARAM_NUM])
and result_set.estimated_merged_results >= int(args[api.PARAM_NUM])
and float(args[api.PARAM_OVERFETCH_RATIO]) < api.CONST_MAX_OVERFETCH_RATIO):
# Note: recursion terminated by value of overfetch >= api.CONST_MAX_OVERFETCH_RATIO
args[api.PARAM_OVERFETCH_RATIO] = api.CONST_MAX_OVERFETCH_RATIO
logging.info("requery with overfetch=%d" % args[api.PARAM_OVERFETCH_RATIO])
# requery now w/ max overfetch_ratio
result_set = fetch_result_set(args)
return result_set
| Python |
#!/usr/bin/python2.5
# Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""User Info module (userinfo).
This file contains the base class for the userinfo classes.
It also contains (at least for now) subclasses for different login types."""
__author__ = 'matthew.blain@google.com'
import logging
import os
from django.utils import simplejson
from google.appengine.api import memcache
from google.appengine.api import urlfetch
from StringIO import StringIO
from facebook import Facebook
import deploy
import models
import utils
class Error(Exception): pass
class NotLoggedInError(Error): pass
class ThirdPartyError(Error): pass
USERINFO_CACHE_TIME = 120 # seconds
# Keys specific to Footprint
FRIENDCONNECT_KEY = '02962301966004179520'
def get_cookie(cookie_name):
if 'HTTP_COOKIE' in os.environ:
cookies = os.environ['HTTP_COOKIE']
cookies = cookies.split('; ')
for cookie in cookies:
cookie = cookie.split('=')
if cookie[0] == cookie_name:
return cookie[1]
def get_user(request):
for cls in (TestUser, FriendConnectUser, FacebookUser):
cookie = cls.get_cookie()
if cookie:
key = 'cookie:' + cookie
user = memcache.get(key)
if not user:
try:
user = cls(request)
memcache.set(key, user, time = USERINFO_CACHE_TIME)
except:
# This hides all errors from the Facebook client library
# TODO(doll): Hand back an error message to the user
logging.exception("Facebook or Friend Connect client exception.")
return None
return user
def get_usig(user):
"""Get a signature for the current user suitable for an XSRF token."""
if user and user.get_cookie():
return utils.signature(user.get_cookie())
class User(object):
"""The User info for a user related to a currently logged in session.."""
def __init__(self, account_type, user_id, display_name, thumbnail_url):
self.account_type = account_type
self.user_id = user_id
self.display_name = display_name
self.thumbnail_url = thumbnail_url
self.user_info = None
self.friends = None
self.total_friends = None
@staticmethod
def get_current_user(self):
raise NotImplementedError
def get_user_info(self):
if not self.user_info:
self.user_info = models.UserInfo.get_or_insert_user(self.account_type,
self.user_id)
return self.user_info
def load_friends(self):
key_suffix = self.account_type + ":" + self.user_id
key = 'friends:' + key_suffix
total_key = 'total_friends:' + key_suffix
self.friends = memcache.get(key)
self.total_friends = memcache.get(total_key)
if not self.friends:
self.friends = self.get_friends_by_url();
memcache.set(key, self.friends, time = USERINFO_CACHE_TIME)
memcache.set(total_key, self.total_friends, time = USERINFO_CACHE_TIME)
return self.friends
def get_friends_by_url(self):
raise NotImplementedError
@classmethod
def is_logged_in(cls):
cookie = cls.get_cookie()
return not not cookie
class FriendConnectUser(User):
"""A friendconnect user."""
BASE_URL = 'http://www.google.com/friendconnect/api/people/'
USER_INFO_URL = BASE_URL + '@viewer/@self?fcauth=%s'
FRIEND_URL = BASE_URL + '@viewer/@friends?fcauth=%s'
def __init__(self, request):
"""Creates a friendconnect user from the current env, or raises error."""
self.fc_user_info = self.get_fc_user_info()
super(FriendConnectUser, self).__init__(
models.UserInfo.FRIENDCONNECT,
self.fc_user_info['entry']['id'],
self.fc_user_info['entry']['displayName'],
self.fc_user_info['entry']['thumbnailUrl'])
def get_friends_by_url(self):
friend_cookie = self.get_cookie()
if not friend_cookie:
raise NotLoggedInError()
self.friends = []
url = self.FRIEND_URL % friend_cookie
result = urlfetch.fetch(url)
if result.status_code == 200:
friend_info = simplejson.load(StringIO(result.content))
self.total_friends = friend_info['totalResults']
for friend_object in friend_info['entry']:
friend = User(
models.UserInfo.FRIENDCONNECT,
friend_object['id'],
friend_object['displayName'],
friend_object['thumbnailUrl'])
self.friends.append(friend)
return self.friends
@classmethod
def get_cookie(cls):
return get_cookie('fcauth' + FRIENDCONNECT_KEY)
@classmethod
def get_fc_user_info(cls):
friend_cookie = cls.get_cookie()
if not friend_cookie:
raise NotLoggedInError()
return
url = cls.USER_INFO_URL % friend_cookie
result = urlfetch.fetch(url)
if result.status_code == 200:
user_info = simplejson.load(StringIO(result.content))
return user_info
else:
raise ThirdPartyError()
class FacebookUser(User):
def __init__(self, request):
self.facebook = Facebook(deploy.get_facebook_key(),
deploy.get_facebook_secret())
if not self.facebook.check_connect_session(request):
raise NotLoggedInError()
info = self.facebook.users.getInfo([self.facebook.uid],
['name', 'pic_square_with_logo'])[0]
super(FacebookUser, self).__init__(
models.UserInfo.FACEBOOK,
self.facebook.uid,
info['name'],
info['pic_square_with_logo'])
def get_friends_by_url(self):
if not self.facebook:
raise NotLoggedInError()
self.friends = []
friend_ids = self.facebook.friends.getAppUsers()
if not friend_ids or len(friend_ids) == 0:
friend_ids = [] # Force return type to be a list, not a dict or None.
self.total_friends = len(friend_ids)
# TODO: handle >20 friends.
friend_objects = self.facebook.users.getInfo([friend_ids[0:20]],
['name', 'pic_square_with_logo'])
for friend_object in friend_objects:
friend = User(
models.UserInfo.FACEBOOK,
`friend_object['uid']`,
friend_object['name'],
friend_object['pic_square_with_logo'])
self.friends.append(friend)
return self.friends
@classmethod
def get_cookie(cls):
return get_cookie(deploy.get_facebook_key())
class TestUser(User):
"""A really simple user example."""
def __init__(self, request):
"""Creates a user, or raises error."""
cookie = self.get_cookie()
if not (cookie):
raise NotLoggedInError()
super(TestUser, self).__init__(
models.UserInfo.TEST,
cookie,
cookie,
'images/Event-Selected-Star.png')
@classmethod
def get_cookie(cls):
return get_cookie('footprinttest')
def get_friends_by_url(self):
# TODO: Something clever for testing--like all TestUser?
return []
| Python |
#!/usr/bin/env python
#
# Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Creates backup of tables.
"""
import sys
import logging
import getopt
import urllib2
import datetime
from datetime import date
def print_usage_exit(code):
""" print usage and exit """
print sys.modules['__main__'].__doc__ % sys.argv[0]
sys.stdout.flush()
sys.stderr.flush()
sys.exit(code)
def handle_response(url):
""" read the last key and the number of records copied """
try:
connection = urllib2.urlopen(url)
content = connection.read()
connection.close()
except urllib2.URLError, eobj:
logging.error('%s returned error %i, %s' % (url, eobj.code, eobj.msg))
sys.exit(2)
last_key = ""
rows = 0
lines = content.split("\n")
for line in lines:
field = line.split("\t")
if field[0] == "rows":
rows = int(field[1])
elif field[0] == "last_key":
last_key = field[1]
return last_key, rows
def parse_arguments(argv):
""" parse arguments """
opts, args = getopt.getopt(
argv[1:],
'dh',
['debug', 'help', 'url=', 'table=',
'backup_version=', 'restore_version=', 'digsig=', 'batch_size='
])
def lzero(number_string):
""" prepend 0 if length less than 2 """
rtn = number_string
while len(rtn) < 2:
rtn = '0' + rtn
return rtn
url = "http://footprint2009dev.appspot.com/export"
table = ''
tod = date.today()
backup_version = str(tod.year) + lzero(str(tod.month)) + lzero(str(tod.day))
restore_version = ''
digsig = ''
batch_size = 1000
for option, value in opts:
if option == '--debug':
logging.getLogger().setLevel(logging.DEBUG)
if option in ('-h', '--help'):
print_usage_exit(0)
if option == '--url':
url = value
if option == '--backup_version':
backup_version = value
if restore_version:
print >> sys.stderr, 'backup and restore are mutually exclusive'
print_usage_exit(1)
if option == '--restore_version':
restore_version = value
if backup_version:
print >> sys.stderr, 'backup and restore are mutually exclusive'
print_usage_exit(1)
if option == '--table':
table = value
if option == '--digsig':
digsig = value
if option == '--batch_size':
batch_size = int(value)
if batch_size <= 0:
print >> sys.stderr, 'batch_size must be 1 or larger'
print_usage_exit(1)
opts = args # because pylint said args was unused
return (url, table, backup_version, restore_version, batch_size, digsig)
def main(argv):
""" start here """
logging.basicConfig(
level=logging.INFO,
format='%(levelname)-8s %(asctime)s %(message)s')
args = parse_arguments(argv)
if [arg for arg in args if arg is None]:
print >> sys.stderr, 'Invalid arguments'
print_usage_exit(1)
base_url, table, backup_version, restore_version, batch_size, digsig = args
if not base_url:
print >> sys.stderr, 'specify url'
print_usage_exit(1)
if backup_version:
url = "%s/%s/%s_%s" % (base_url, table, table, backup_version)
elif restore_version:
url = "%s/%s_%s/%s" % (base_url, table, table, restore_version)
else:
print >> sys.stderr, 'specify either backup_version or restore_version'
print_usage_exit(1)
min_key = ''
lines = batch_size
while lines == batch_size:
url_step = ("%s?digsig=%s&min_key=%s&limit=%s" %
(url, str(digsig), str(min_key), str(batch_size)))
if min_key != "":
log_key = min_key
else:
log_key = "[start]"
start_time = datetime.datetime.now()
min_key, lines = handle_response(url_step)
diff = datetime.datetime.now() - start_time
secs = "%d.%d" % (diff.seconds, diff.microseconds/1000)
logging.info('fetched %d in %s secs from %s', lines, secs, log_key)
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
| Python |
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tool for uploading diffs from a version control system to the codereview app.
Usage summary: upload.py [options] [-- diff_options]
Diff options are passed to the diff command of the underlying system.
Supported version control systems:
Git
Mercurial
Subversion
It is important for Git/Mercurial users to specify a tree/node/branch to diff
against by using the '--rev' option.
"""
# This code is derived from appcfg.py in the App Engine SDK (open source),
# and from ASPN recipe #146306.
import cookielib
import getpass
import logging
import md5
import mimetypes
import optparse
import os
import re
import socket
import subprocess
import sys
import urllib
import urllib2
import urlparse
import tempfile
try:
import readline
except ImportError:
logging.debug("readline not found.")
pass
# The logging verbosity:
# 0: Errors only.
# 1: Status messages.
# 2: Info logs.
# 3: Debug logs.
verbosity = 1
# Max size of patch or base file.
MAX_UPLOAD_SIZE = 900 * 1024
def AreYouSureOrExit(exit_if_no=True):
prompt = "Are you sure you want to continue?(y/N) "
answer = raw_input(prompt).strip()
if exit_if_no and answer.lower() != "y":
ErrorExit("User aborted")
return answer.lower() == "y"
def GetEmail(prompt):
"""Prompts the user for their email address and returns it.
The last used email address is saved to a file and offered up as a suggestion
to the user. If the user presses enter without typing in anything the last
used email address is used. If the user enters a new address, it is saved
for next time we prompt.
"""
last_email_file_name = os.path.expanduser("~/.last_codereview_email_address")
last_email = ""
if os.path.exists(last_email_file_name):
try:
last_email_file = open(last_email_file_name, "r")
last_email = last_email_file.readline().strip("\n")
last_email_file.close()
prompt += " [%s]" % last_email
except IOError, e:
pass
email = raw_input(prompt + ": ").strip()
if email:
try:
last_email_file = open(last_email_file_name, "w")
last_email_file.write(email)
last_email_file.close()
except IOError, e:
pass
else:
email = last_email
return email
def StatusUpdate(msg):
"""Print a status message to stdout.
If 'verbosity' is greater than 0, print the message.
Args:
msg: The string to print.
"""
if verbosity > 0:
print msg
def ErrorExit(msg):
"""Print an error message to stderr and exit."""
print >>sys.stderr, msg
sys.exit(1)
class ClientLoginError(urllib2.HTTPError):
"""Raised to indicate there was an error authenticating with ClientLogin."""
def __init__(self, url, code, msg, headers, args):
urllib2.HTTPError.__init__(self, url, code, msg, headers, None)
self.args = args
self.reason = args["Error"]
class AbstractRpcServer(object):
"""Provides a common interface for a simple RPC server."""
def __init__(self, host, auth_function, host_override=None, extra_headers={},
save_cookies=False):
"""Creates a new HttpRpcServer.
Args:
host: The host to send requests to.
auth_function: A function that takes no arguments and returns an
(email, password) tuple when called. Will be called if authentication
is required.
host_override: The host header to send to the server (defaults to host).
extra_headers: A dict of extra headers to append to every request.
save_cookies: If True, save the authentication cookies to local disk.
If False, use an in-memory cookiejar instead. Subclasses must
implement this functionality. Defaults to False.
"""
self.host = host
self.host_override = host_override
self.auth_function = auth_function
self.authenticated = False
self.extra_headers = extra_headers
self.save_cookies = save_cookies
self.opener = self._GetOpener()
if self.host_override:
logging.info("Server: %s; Host: %s", self.host, self.host_override)
else:
logging.info("Server: %s", self.host)
def _GetOpener(self):
"""Returns an OpenerDirector for making HTTP requests.
Returns:
A urllib2.OpenerDirector object.
"""
raise NotImplementedError()
def _CreateRequest(self, url, data=None):
"""Creates a new urllib request."""
logging.debug("Creating request for: '%s' with payload:\n%s", url, data)
req = urllib2.Request(url, data=data)
if self.host_override:
req.add_header("Host", self.host_override)
for key, value in self.extra_headers.iteritems():
req.add_header(key, value)
return req
def _GetAuthToken(self, email, password):
"""Uses ClientLogin to authenticate the user, returning an auth token.
Args:
email: The user's email address
password: The user's password
Raises:
ClientLoginError: If there was an error authenticating with ClientLogin.
HTTPError: If there was some other form of HTTP error.
Returns:
The authentication token returned by ClientLogin.
"""
account_type = "GOOGLE"
if self.host.endswith(".google.com"):
# Needed for use inside Google.
account_type = "HOSTED"
req = self._CreateRequest(
url="https://www.google.com/accounts/ClientLogin",
data=urllib.urlencode({
"Email": email,
"Passwd": password,
"service": "ah",
"source": "rietveld-codereview-upload",
"accountType": account_type,
}),
)
try:
response = self.opener.open(req)
response_body = response.read()
response_dict = dict(x.split("=")
for x in response_body.split("\n") if x)
return response_dict["Auth"]
except urllib2.HTTPError, e:
if e.code == 403:
body = e.read()
response_dict = dict(x.split("=", 1) for x in body.split("\n") if x)
raise ClientLoginError(req.get_full_url(), e.code, e.msg,
e.headers, response_dict)
else:
raise
def _GetAuthCookie(self, auth_token):
"""Fetches authentication cookies for an authentication token.
Args:
auth_token: The authentication token returned by ClientLogin.
Raises:
HTTPError: If there was an error fetching the authentication cookies.
"""
# This is a dummy value to allow us to identify when we're successful.
continue_location = "http://localhost/"
args = {"continue": continue_location, "auth": auth_token}
req = self._CreateRequest("http://%s/_ah/login?%s" %
(self.host, urllib.urlencode(args)))
try:
response = self.opener.open(req)
except urllib2.HTTPError, e:
response = e
if (response.code != 302 or
response.info()["location"] != continue_location):
raise urllib2.HTTPError(req.get_full_url(), response.code, response.msg,
response.headers, response.fp)
self.authenticated = True
def _Authenticate(self):
"""Authenticates the user.
The authentication process works as follows:
1) We get a username and password from the user
2) We use ClientLogin to obtain an AUTH token for the user
(see http://code.google.com/apis/accounts/AuthForInstalledApps.html).
3) We pass the auth token to /_ah/login on the server to obtain an
authentication cookie. If login was successful, it tries to redirect
us to the URL we provided.
If we attempt to access the upload API without first obtaining an
authentication cookie, it returns a 401 response and directs us to
authenticate ourselves with ClientLogin.
"""
for i in range(3):
credentials = self.auth_function()
try:
auth_token = self._GetAuthToken(credentials[0], credentials[1])
except ClientLoginError, e:
if e.reason == "BadAuthentication":
print >>sys.stderr, "Invalid username or password."
continue
if e.reason == "CaptchaRequired":
print >>sys.stderr, (
"Please go to\n"
"https://www.google.com/accounts/DisplayUnlockCaptcha\n"
"and verify you are a human. Then try again.")
break
if e.reason == "NotVerified":
print >>sys.stderr, "Account not verified."
break
if e.reason == "TermsNotAgreed":
print >>sys.stderr, "User has not agreed to TOS."
break
if e.reason == "AccountDeleted":
print >>sys.stderr, "The user account has been deleted."
break
if e.reason == "AccountDisabled":
print >>sys.stderr, "The user account has been disabled."
break
if e.reason == "ServiceDisabled":
print >>sys.stderr, ("The user's access to the service has been "
"disabled.")
break
if e.reason == "ServiceUnavailable":
print >>sys.stderr, "The service is not available; try again later."
break
raise
self._GetAuthCookie(auth_token)
return
def Send(self, request_path, payload=None,
content_type="application/octet-stream",
timeout=None,
**kwargs):
"""Sends an RPC and returns the response.
Args:
request_path: The path to send the request to, eg /api/appversion/create.
payload: The body of the request, or None to send an empty request.
content_type: The Content-Type header to use.
timeout: timeout in seconds; default None i.e. no timeout.
(Note: for large requests on OS X, the timeout doesn't work right.)
kwargs: Any keyword arguments are converted into query string parameters.
Returns:
The response body, as a string.
"""
# TODO: Don't require authentication. Let the server say
# whether it is necessary.
if not self.authenticated:
self._Authenticate()
old_timeout = socket.getdefaulttimeout()
socket.setdefaulttimeout(timeout)
try:
tries = 0
while True:
tries += 1
args = dict(kwargs)
url = "http://%s%s" % (self.host, request_path)
if args:
url += "?" + urllib.urlencode(args)
req = self._CreateRequest(url=url, data=payload)
req.add_header("Content-Type", content_type)
try:
f = self.opener.open(req)
response = f.read()
f.close()
return response
except urllib2.HTTPError, e:
if tries > 3:
raise
elif e.code == 401:
self._Authenticate()
## elif e.code >= 500 and e.code < 600:
## # Server Error - try again.
## continue
else:
raise
finally:
socket.setdefaulttimeout(old_timeout)
class HttpRpcServer(AbstractRpcServer):
"""Provides a simplified RPC-style interface for HTTP requests."""
def _Authenticate(self):
"""Save the cookie jar after authentication."""
super(HttpRpcServer, self)._Authenticate()
if self.save_cookies:
StatusUpdate("Saving authentication cookies to %s" % self.cookie_file)
self.cookie_jar.save()
def _GetOpener(self):
"""Returns an OpenerDirector that supports cookies and ignores redirects.
Returns:
A urllib2.OpenerDirector object.
"""
opener = urllib2.OpenerDirector()
opener.add_handler(urllib2.ProxyHandler())
opener.add_handler(urllib2.UnknownHandler())
opener.add_handler(urllib2.HTTPHandler())
opener.add_handler(urllib2.HTTPDefaultErrorHandler())
opener.add_handler(urllib2.HTTPSHandler())
opener.add_handler(urllib2.HTTPErrorProcessor())
if self.save_cookies:
self.cookie_file = os.path.expanduser("~/.codereview_upload_cookies")
self.cookie_jar = cookielib.MozillaCookieJar(self.cookie_file)
if os.path.exists(self.cookie_file):
try:
self.cookie_jar.load()
self.authenticated = True
StatusUpdate("Loaded authentication cookies from %s" %
self.cookie_file)
except (cookielib.LoadError, IOError):
# Failed to load cookies - just ignore them.
pass
else:
# Create an empty cookie file with mode 600
fd = os.open(self.cookie_file, os.O_CREAT, 0600)
os.close(fd)
# Always chmod the cookie file
os.chmod(self.cookie_file, 0600)
else:
# Don't save cookies across runs of update.py.
self.cookie_jar = cookielib.CookieJar()
opener.add_handler(urllib2.HTTPCookieProcessor(self.cookie_jar))
return opener
parser = optparse.OptionParser(usage="%prog [options] [-- diff_options]")
parser.add_option("-y", "--assume_yes", action="store_true",
dest="assume_yes", default=False,
help="Assume that the answer to yes/no questions is 'yes'.")
# Logging
group = parser.add_option_group("Logging options")
group.add_option("-q", "--quiet", action="store_const", const=0,
dest="verbose", help="Print errors only.")
group.add_option("-v", "--verbose", action="store_const", const=2,
dest="verbose", default=1,
help="Print info level logs (default).")
group.add_option("--noisy", action="store_const", const=3,
dest="verbose", help="Print all logs.")
# Review server
group = parser.add_option_group("Review server options")
group.add_option("-s", "--server", action="store", dest="server",
default="codereview.appspot.com",
metavar="SERVER",
help=("The server to upload to. The format is host[:port]. "
"Defaults to 'codereview.appspot.com'."))
group.add_option("-e", "--email", action="store", dest="email",
metavar="EMAIL", default=None,
help="The username to use. Will prompt if omitted.")
group.add_option("-H", "--host", action="store", dest="host",
metavar="HOST", default=None,
help="Overrides the Host header sent with all RPCs.")
group.add_option("--no_cookies", action="store_false",
dest="save_cookies", default=True,
help="Do not save authentication cookies to local disk.")
# Issue
group = parser.add_option_group("Issue options")
group.add_option("-d", "--description", action="store", dest="description",
metavar="DESCRIPTION", default=None,
help="Optional description when creating an issue.")
group.add_option("--min_pylint_score", action="store", dest="min_pylint_score",
metavar="MIN_PYLINT_SCORE", default=None,
help="run pylint over changed files and require a min score.")
group.add_option("-f", "--description_file", action="store",
dest="description_file", metavar="DESCRIPTION_FILE",
default=None,
help="Optional path of a file that contains "
"the description when creating an issue.")
group.add_option("--description_editor", action="store_true",
dest="description_editor", metavar="DESCRIPTION_EDITOR",
default=False,
help="use an editor (EDITOR env variable) to get the "
"description when creating an issue.")
group.add_option("-r", "--reviewers", action="store", dest="reviewers",
metavar="REVIEWERS", default=None,
help="Add reviewers (comma separated email addresses).")
group.add_option("--cc", action="store", dest="cc",
metavar="CC", default=None,
help="Add CC (comma separated email addresses).")
# Upload options
group = parser.add_option_group("Patch options")
group.add_option("-m", "--message", action="store", dest="message",
metavar="MESSAGE", default=None,
help="A message to identify the patch. "
"Will prompt if omitted.")
group.add_option("-i", "--issue", type="int", action="store",
metavar="ISSUE", default=None,
help="Issue number to which to add. Defaults to new issue.")
group.add_option("--download_base", action="store_true",
dest="download_base", default=False,
help="Base files will be downloaded by the server "
"(side-by-side diffs may not work on files with CRs).")
group.add_option("--rev", action="store", dest="revision",
metavar="REV", default=None,
help="Branch/tree/revision to diff against (used by DVCS).")
group.add_option("--send_mail", action="store_true",
dest="send_mail", default=False,
help="Send notification email to reviewers.")
def GetRpcServer(options):
"""Returns an instance of an AbstractRpcServer.
Returns:
A new AbstractRpcServer, on which RPC calls can be made.
"""
rpc_server_class = HttpRpcServer
def GetUserCredentials():
"""Prompts the user for a username and password."""
email = options.email
if email is None:
email = GetEmail("Email (login for uploading to %s)" % options.server)
password = getpass.getpass("Password for %s: " % email)
return (email, password)
# If this is the dev_appserver, use fake authentication.
host = (options.host or options.server).lower()
if host == "localhost" or host.startswith("localhost:"):
email = options.email
if email is None:
email = "test@example.com"
logging.info("Using debug user %s. Override with --email" % email)
server = rpc_server_class(
options.server,
lambda: (email, "password"),
host_override=options.host,
extra_headers={"Cookie":
'dev_appserver_login="%s:False"' % email},
save_cookies=options.save_cookies)
# Don't try to talk to ClientLogin.
server.authenticated = True
return server
return rpc_server_class(options.server, GetUserCredentials,
host_override=options.host,
save_cookies=options.save_cookies)
def EncodeMultipartFormData(fields, files):
"""Encode form fields for multipart/form-data.
Args:
fields: A sequence of (name, value) elements for regular form fields.
files: A sequence of (name, filename, value) elements for data to be
uploaded as files.
Returns:
(content_type, body) ready for httplib.HTTP instance.
Source:
http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/146306
"""
BOUNDARY = '-M-A-G-I-C---B-O-U-N-D-A-R-Y-'
CRLF = '\r\n'
lines = []
for (key, value) in fields:
lines.append('--' + BOUNDARY)
lines.append('Content-Disposition: form-data; name="%s"' % key)
lines.append('')
lines.append(value)
for (key, filename, value) in files:
lines.append('--' + BOUNDARY)
lines.append('Content-Disposition: form-data; name="%s"; filename="%s"' %
(key, filename))
lines.append('Content-Type: %s' % GetContentType(filename))
lines.append('')
lines.append(value)
lines.append('--' + BOUNDARY + '--')
lines.append('')
body = CRLF.join(lines)
content_type = 'multipart/form-data; boundary=%s' % BOUNDARY
return content_type, body
def GetContentType(filename):
"""Helper to guess the content-type from the filename."""
return mimetypes.guess_type(filename)[0] or 'application/octet-stream'
# Use a shell for subcommands on Windows to get a PATH search.
use_shell = sys.platform.startswith("win")
def RunShellWithReturnCode(command, print_output=False,
universal_newlines=True):
"""Executes a command and returns the output from stdout and the return code.
Args:
command: Command to execute.
print_output: If True, the output is printed to stdout.
If False, both stdout and stderr are ignored.
universal_newlines: Use universal_newlines flag (default: True).
Returns:
Tuple (output, return code)
"""
logging.info("Running %s", command)
p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
shell=use_shell, universal_newlines=universal_newlines)
if print_output:
output_array = []
while True:
line = p.stdout.readline()
if not line:
break
print line.strip("\n")
output_array.append(line)
output = "".join(output_array)
else:
output = p.stdout.read()
p.wait()
errout = p.stderr.read()
if print_output and errout:
print >>sys.stderr, errout
p.stdout.close()
p.stderr.close()
return output, p.returncode
def RunShell(command, silent_ok=False, universal_newlines=True,
print_output=False, ignore_retcode=False):
data, retcode = RunShellWithReturnCode(command, print_output,
universal_newlines)
if retcode and not ignore_retcode:
ErrorExit("Got error status from %s:\n%s" % (command, data))
if not silent_ok and not data:
ErrorExit("No output from %s" % command)
return data
class VersionControlSystem(object):
"""Abstract base class providing an interface to the VCS."""
def __init__(self, options):
"""Constructor.
Args:
options: Command line options.
"""
self.options = options
def GenerateDiff(self, args):
"""Return the current diff as a string.
Args:
args: Extra arguments to pass to the diff command.
"""
raise NotImplementedError(
"abstract method -- subclass %s must override" % self.__class__)
def GetUnknownFiles(self):
"""Return a list of files unknown to the VCS."""
raise NotImplementedError(
"abstract method -- subclass %s must override" % self.__class__)
def CheckForUnknownFiles(self):
"""Show an "are you sure?" prompt if there are unknown files."""
unknown_files = self.GetUnknownFiles()
if unknown_files:
print "The following files are not added to version control:"
for line in unknown_files:
print line
AreYouSureOrExit()
def GetBaseFile(self, filename):
"""Get the content of the upstream version of a file.
Returns:
A tuple (base_content, new_content, is_binary, status)
base_content: The contents of the base file.
new_content: For text files, this is empty. For binary files, this is
the contents of the new file, since the diff output won't contain
information to reconstruct the current file.
is_binary: True iff the file is binary.
status: The status of the file.
"""
raise NotImplementedError(
"abstract method -- subclass %s must override" % self.__class__)
def GetBaseFiles(self, diff):
"""Helper that calls GetBase file for each file in the patch.
Returns:
A dictionary that maps from filename to GetBaseFile's tuple. Filenames
are retrieved based on lines that start with "Index:" or
"Property changes on:".
"""
files = {}
for line in diff.splitlines(True):
if line.startswith('Index:') or line.startswith('Property changes on:'):
unused, filename = line.split(':', 1)
# On Windows if a file has property changes its filename uses '\'
# instead of '/'.
filename = filename.strip().replace('\\', '/')
files[filename] = self.GetBaseFile(filename)
return files
def UploadBaseFiles(self, issue, rpc_server, patch_list, patchset, options,
files):
"""Uploads the base files (and if necessary, the current ones as well)."""
def UploadFile(filename, file_id, content, is_binary, status, is_base):
"""Uploads a file to the server."""
file_too_large = False
if is_base:
type = "base"
else:
type = "current"
if len(content) > MAX_UPLOAD_SIZE:
print ("Not uploading the %s file for %s because it's too large." %
(type, filename))
file_too_large = True
content = ""
checksum = md5.new(content).hexdigest()
if options.verbose > 0 and not file_too_large:
print "Uploading %s file for %s" % (type, filename)
url = "/%d/upload_content/%d/%d" % (int(issue), int(patchset), file_id)
form_fields = [("filename", filename),
("status", status),
("checksum", checksum),
("is_binary", str(is_binary)),
("is_current", str(not is_base)),
]
if file_too_large:
form_fields.append(("file_too_large", "1"))
if options.email:
form_fields.append(("user", options.email))
ctype, body = EncodeMultipartFormData(form_fields,
[("data", filename, content)])
response_body = rpc_server.Send(url, body,
content_type=ctype)
if not response_body.startswith("OK"):
StatusUpdate(" --> %s" % response_body)
sys.exit(1)
patches = dict()
[patches.setdefault(v, k) for k, v in patch_list]
for filename in patches.keys():
base_content, new_content, is_binary, status = files[filename]
file_id_str = patches.get(filename)
if file_id_str.find("nobase") != -1:
base_content = None
file_id_str = file_id_str[file_id_str.rfind("_") + 1:]
file_id = int(file_id_str)
if base_content != None:
UploadFile(filename, file_id, base_content, is_binary, status, True)
if new_content != None:
UploadFile(filename, file_id, new_content, is_binary, status, False)
def IsImage(self, filename):
"""Returns true if the filename has an image extension."""
mimetype = mimetypes.guess_type(filename)[0]
if not mimetype:
return False
return mimetype.startswith("image/")
class SubversionVCS(VersionControlSystem):
"""Implementation of the VersionControlSystem interface for Subversion."""
def __init__(self, options):
super(SubversionVCS, self).__init__(options)
if self.options.revision:
match = re.match(r"(\d+)(:(\d+))?", self.options.revision)
if not match:
ErrorExit("Invalid Subversion revision %s." % self.options.revision)
self.rev_start = match.group(1)
self.rev_end = match.group(3)
else:
self.rev_start = self.rev_end = None
# Cache output from "svn list -r REVNO dirname".
# Keys: dirname, Values: 2-tuple (ouput for start rev and end rev).
self.svnls_cache = {}
# SVN base URL is required to fetch files deleted in an older revision.
# Result is cached to not guess it over and over again in GetBaseFile().
required = self.options.download_base or self.options.revision is not None
self.svn_base = self._GuessBase(required)
def GuessBase(self, required):
"""Wrapper for _GuessBase."""
return self.svn_base
def _GuessBase(self, required):
"""Returns the SVN base URL.
Args:
required: If true, exits if the url can't be guessed, otherwise None is
returned.
"""
info = RunShell(["svn", "info"])
for line in info.splitlines():
words = line.split()
if len(words) == 2 and words[0] == "URL:":
url = words[1]
scheme, netloc, path, params, query, fragment = urlparse.urlparse(url)
username, netloc = urllib.splituser(netloc)
if username:
logging.info("Removed username from base URL")
if netloc.endswith("svn.python.org"):
if netloc == "svn.python.org":
if path.startswith("/projects/"):
path = path[9:]
elif netloc != "pythondev@svn.python.org":
ErrorExit("Unrecognized Python URL: %s" % url)
base = "http://svn.python.org/view/*checkout*%s/" % path
logging.info("Guessed Python base = %s", base)
elif netloc.endswith("svn.collab.net"):
if path.startswith("/repos/"):
path = path[6:]
base = "http://svn.collab.net/viewvc/*checkout*%s/" % path
logging.info("Guessed CollabNet base = %s", base)
elif netloc.endswith(".googlecode.com"):
path = path + "/"
base = urlparse.urlunparse(("http", netloc, path, params,
query, fragment))
logging.info("Guessed Google Code base = %s", base)
else:
path = path + "/"
base = urlparse.urlunparse((scheme, netloc, path, params,
query, fragment))
logging.info("Guessed base = %s", base)
return base
if required:
ErrorExit("Can't find URL in output from svn info")
return None
def GenerateDiff(self, args):
cmd = ["svn", "diff"]
if self.options.revision:
cmd += ["-r", self.options.revision]
if "--diff-cmd" not in args and os.path.isfile("/usr/bin/diff"):
# force /usr/bin/diff as the diff command used by subversion
# to override user settings (fixes issue with colordiff)
cmd += ["--diff-cmd", "/usr/bin/diff"]
cmd.extend(args)
data = RunShell(cmd)
count = 0
for line in data.splitlines():
if line.startswith("Index:") or line.startswith("Property changes on:"):
count += 1
logging.info(line)
if not count:
ErrorExit("No valid patches found in output from svn diff")
return data
def _CollapseKeywords(self, content, keyword_str):
"""Collapses SVN keywords."""
# svn cat translates keywords but svn diff doesn't. As a result of this
# behavior patching.PatchChunks() fails with a chunk mismatch error.
# This part was originally written by the Review Board development team
# who had the same problem (http://reviews.review-board.org/r/276/).
# Mapping of keywords to known aliases
svn_keywords = {
# Standard keywords
'Date': ['Date', 'LastChangedDate'],
'Revision': ['Revision', 'LastChangedRevision', 'Rev'],
'Author': ['Author', 'LastChangedBy'],
'HeadURL': ['HeadURL', 'URL'],
'Id': ['Id'],
# Aliases
'LastChangedDate': ['LastChangedDate', 'Date'],
'LastChangedRevision': ['LastChangedRevision', 'Rev', 'Revision'],
'LastChangedBy': ['LastChangedBy', 'Author'],
'URL': ['URL', 'HeadURL'],
}
def repl(m):
if m.group(2):
return "$%s::%s$" % (m.group(1), " " * len(m.group(3)))
return "$%s$" % m.group(1)
keywords = [keyword
for name in keyword_str.split(" ")
for keyword in svn_keywords.get(name, [])]
return re.sub(r"\$(%s):(:?)([^\$]+)\$" % '|'.join(keywords), repl, content)
def GetUnknownFiles(self):
status = RunShell(["svn", "status", "--ignore-externals"], silent_ok=True)
unknown_files = []
for line in status.split("\n"):
if line and line[0] == "?":
unknown_files.append(line)
return unknown_files
def ReadFile(self, filename):
"""Returns the contents of a file."""
file = open(filename, 'rb')
result = ""
try:
result = file.read()
finally:
file.close()
return result
def GetStatus(self, filename):
"""Returns the status of a file."""
if not self.options.revision:
status = RunShell(["svn", "status", "--ignore-externals", filename])
if not status:
ErrorExit("svn status returned no output for %s" % filename)
status_lines = status.splitlines()
# If file is in a cl, the output will begin with
# "\n--- Changelist 'cl_name':\n". See
# http://svn.collab.net/repos/svn/trunk/notes/changelist-design.txt
if (len(status_lines) == 3 and
not status_lines[0] and
status_lines[1].startswith("--- Changelist")):
status = status_lines[2]
else:
status = status_lines[0]
# If we have a revision to diff against we need to run "svn list"
# for the old and the new revision and compare the results to get
# the correct status for a file.
else:
dirname, relfilename = os.path.split(filename)
if dirname not in self.svnls_cache:
cmd = ["svn", "list", "-r", self.rev_start, dirname or "."]
out, returncode = RunShellWithReturnCode(cmd)
if returncode:
ErrorExit("Failed to get status for %s." % filename)
old_files = out.splitlines()
args = ["svn", "list"]
if self.rev_end:
args += ["-r", self.rev_end]
cmd = args + [dirname or "."]
out, returncode = RunShellWithReturnCode(cmd)
if returncode:
ErrorExit("Failed to run command %s" % cmd)
self.svnls_cache[dirname] = (old_files, out.splitlines())
old_files, new_files = self.svnls_cache[dirname]
if relfilename in old_files and relfilename not in new_files:
status = "D "
elif relfilename in old_files and relfilename in new_files:
status = "M "
else:
status = "A "
return status
def GetBaseFile(self, filename):
status = self.GetStatus(filename)
base_content = None
new_content = None
# If a file is copied its status will be "A +", which signifies
# "addition-with-history". See "svn st" for more information. We need to
# upload the original file or else diff parsing will fail if the file was
# edited.
if status[0] == "A" and status[3] != "+":
# We'll need to upload the new content if we're adding a binary file
# since diff's output won't contain it.
mimetype = RunShell(["svn", "propget", "svn:mime-type", filename],
silent_ok=True)
base_content = ""
is_binary = bool(mimetype) and not mimetype.startswith("text/")
if is_binary and self.IsImage(filename):
new_content = self.ReadFile(filename)
elif (status[0] in ("M", "D", "R") or
(status[0] == "A" and status[3] == "+") or # Copied file.
(status[0] == " " and status[1] == "M")): # Property change.
args = []
if self.options.revision:
url = "%s/%s@%s" % (self.svn_base, filename, self.rev_start)
else:
# Don't change filename, it's needed later.
url = filename
args += ["-r", "BASE"]
cmd = ["svn"] + args + ["propget", "svn:mime-type", url]
mimetype, returncode = RunShellWithReturnCode(cmd)
if returncode:
# File does not exist in the requested revision.
# Reset mimetype, it contains an error message.
mimetype = ""
get_base = False
is_binary = bool(mimetype) and not mimetype.startswith("text/")
if status[0] == " ":
# Empty base content just to force an upload.
base_content = ""
elif is_binary:
if self.IsImage(filename):
get_base = True
if status[0] == "M":
if not self.rev_end:
new_content = self.ReadFile(filename)
else:
url = "%s/%s@%s" % (self.svn_base, filename, self.rev_end)
new_content = RunShell(["svn", "cat", url],
universal_newlines=True, silent_ok=True)
else:
base_content = ""
else:
get_base = True
if get_base:
if is_binary:
universal_newlines = False
else:
universal_newlines = True
if self.rev_start:
# "svn cat -r REV delete_file.txt" doesn't work. cat requires
# the full URL with "@REV" appended instead of using "-r" option.
url = "%s/%s@%s" % (self.svn_base, filename, self.rev_start)
base_content = RunShell(["svn", "cat", url],
universal_newlines=universal_newlines,
silent_ok=True)
else:
base_content = RunShell(["svn", "cat", filename],
universal_newlines=universal_newlines,
silent_ok=True)
if not is_binary:
args = []
if self.rev_start:
url = "%s/%s@%s" % (self.svn_base, filename, self.rev_start)
else:
url = filename
args += ["-r", "BASE"]
cmd = ["svn"] + args + ["propget", "svn:keywords", url]
keywords, returncode = RunShellWithReturnCode(cmd)
if keywords and not returncode:
base_content = self._CollapseKeywords(base_content, keywords)
else:
StatusUpdate("svn status returned unexpected output: %s" % status)
sys.exit(1)
return base_content, new_content, is_binary, status[0:5]
class GitVCS(VersionControlSystem):
"""Implementation of the VersionControlSystem interface for Git."""
def __init__(self, options):
super(GitVCS, self).__init__(options)
# Map of filename -> hash of base file.
self.base_hashes = {}
def GenerateDiff(self, extra_args):
# This is more complicated than svn's GenerateDiff because we must convert
# the diff output to include an svn-style "Index:" line as well as record
# the hashes of the base files, so we can upload them along with our diff.
if self.options.revision:
extra_args = [self.options.revision] + extra_args
gitdiff = RunShell(["git", "diff", "--full-index"] + extra_args)
svndiff = []
filecount = 0
filename = None
for line in gitdiff.splitlines():
match = re.match(r"diff --git a/(.*) b/.*$", line)
if match:
filecount += 1
filename = match.group(1)
svndiff.append("Index: %s\n" % filename)
else:
# The "index" line in a git diff looks like this (long hashes elided):
# index 82c0d44..b2cee3f 100755
# We want to save the left hash, as that identifies the base file.
match = re.match(r"index (\w+)\.\.", line)
if match:
self.base_hashes[filename] = match.group(1)
svndiff.append(line + "\n")
if not filecount:
ErrorExit("No valid patches found in output from git diff")
return "".join(svndiff)
def GetUnknownFiles(self):
status = RunShell(["git", "ls-files", "--exclude-standard", "--others"],
silent_ok=True)
return status.splitlines()
def GetBaseFile(self, filename):
hash = self.base_hashes[filename]
base_content = None
new_content = None
is_binary = False
if hash == "0" * 40: # All-zero hash indicates no base file.
status = "A"
base_content = ""
else:
status = "M"
base_content, returncode = RunShellWithReturnCode(["git", "show", hash])
if returncode:
ErrorExit("Got error status from 'git show %s'" % hash)
return (base_content, new_content, is_binary, status)
class MercurialVCS(VersionControlSystem):
"""Implementation of the VersionControlSystem interface for Mercurial."""
def __init__(self, options, repo_dir):
super(MercurialVCS, self).__init__(options)
# Absolute path to repository (we can be in a subdir)
self.repo_dir = os.path.normpath(repo_dir)
# Compute the subdir
cwd = os.path.normpath(os.getcwd())
assert cwd.startswith(self.repo_dir)
self.subdir = cwd[len(self.repo_dir):].lstrip(r"\/")
if self.options.revision:
self.base_rev = self.options.revision
else:
self.base_rev = RunShell(["hg", "parent", "-q"]).split(':')[1].strip()
def _GetRelPath(self, filename):
"""Get relative path of a file according to the current directory,
given its logical path in the repo."""
assert filename.startswith(self.subdir), filename
return filename[len(self.subdir):].lstrip(r"\/")
def GenerateDiff(self, extra_args):
# If no file specified, restrict to the current subdir
extra_args = extra_args or ["."]
cmd = ["hg", "diff", "--git", "-r", self.base_rev] + extra_args
data = RunShell(cmd, silent_ok=True)
svndiff = []
filecount = 0
for line in data.splitlines():
m = re.match("diff --git a/(\S+) b/(\S+)", line)
if m:
# Modify line to make it look like as it comes from svn diff.
# With this modification no changes on the server side are required
# to make upload.py work with Mercurial repos.
# NOTE: for proper handling of moved/copied files, we have to use
# the second filename.
filename = m.group(2)
svndiff.append("Index: %s" % filename)
svndiff.append("=" * 67)
filecount += 1
logging.info(line)
else:
svndiff.append(line)
if not filecount:
ErrorExit("No valid patches found in output from hg diff")
return "\n".join(svndiff) + "\n"
def GetUnknownFiles(self):
"""Return a list of files unknown to the VCS."""
args = []
status = RunShell(["hg", "status", "--rev", self.base_rev, "-u", "."],
silent_ok=True)
unknown_files = []
for line in status.splitlines():
st, fn = line.split(" ", 1)
if st == "?":
unknown_files.append(fn)
return unknown_files
def GetBaseFile(self, filename):
# "hg status" and "hg cat" both take a path relative to the current subdir
# rather than to the repo root, but "hg diff" has given us the full path
# to the repo root.
base_content = ""
new_content = None
is_binary = False
oldrelpath = relpath = self._GetRelPath(filename)
# "hg status -C" returns two lines for moved/copied files, one otherwise
out = RunShell(["hg", "status", "-C", "--rev", self.base_rev, relpath])
out = out.splitlines()
# HACK: strip error message about missing file/directory if it isn't in
# the working copy
if out[0].startswith('%s: ' % relpath):
out = out[1:]
if len(out) > 1:
# Moved/copied => considered as modified, use old filename to
# retrieve base contents
oldrelpath = out[1].strip()
status = "M"
else:
status, _ = out[0].split(' ', 1)
if status != "A":
base_content = RunShell(["hg", "cat", "-r", self.base_rev, oldrelpath],
silent_ok=True)
is_binary = "\0" in base_content # Mercurial's heuristic
if status != "R":
new_content = open(relpath, "rb").read()
is_binary = is_binary or "\0" in new_content
if is_binary and base_content:
# Fetch again without converting newlines
base_content = RunShell(["hg", "cat", "-r", self.base_rev, oldrelpath],
silent_ok=True, universal_newlines=False)
if not is_binary or not self.IsImage(relpath):
new_content = None
return base_content, new_content, is_binary, status
# NOTE: The SplitPatch function is duplicated in engine.py, keep them in sync.
def SplitPatch(data):
"""Splits a patch into separate pieces for each file.
Args:
data: A string containing the output of svn diff.
Returns:
A list of 2-tuple (filename, text) where text is the svn diff output
pertaining to filename.
"""
patches = []
filename = None
diff = []
for line in data.splitlines(True):
new_filename = None
if line.startswith('Index:'):
unused, new_filename = line.split(':', 1)
new_filename = new_filename.strip()
elif line.startswith('Property changes on:'):
unused, temp_filename = line.split(':', 1)
# When a file is modified, paths use '/' between directories, however
# when a property is modified '\' is used on Windows. Make them the same
# otherwise the file shows up twice.
temp_filename = temp_filename.strip().replace('\\', '/')
if temp_filename != filename:
# File has property changes but no modifications, create a new diff.
new_filename = temp_filename
if new_filename:
if filename and diff:
patches.append((filename, ''.join(diff)))
filename = new_filename
diff = [line]
continue
if diff is not None:
diff.append(line)
if filename and diff:
patches.append((filename, ''.join(diff)))
return patches
def UploadSeparatePatches(issue, rpc_server, patchset, data, options):
"""Uploads a separate patch for each file in the diff output.
Returns a list of [patch_key, filename] for each file.
"""
patches = SplitPatch(data)
rv = []
for patch in patches:
if len(patch[1]) > MAX_UPLOAD_SIZE:
print ("Not uploading the patch for " + patch[0] +
" because the file is too large.")
continue
form_fields = [("filename", patch[0])]
if not options.download_base:
form_fields.append(("content_upload", "1"))
files = [("data", "data.diff", patch[1])]
ctype, body = EncodeMultipartFormData(form_fields, files)
url = "/%d/upload_patch/%d" % (int(issue), int(patchset))
print "Uploading patch for " + patch[0]
response_body = rpc_server.Send(url, body, content_type=ctype)
lines = response_body.splitlines()
if not lines or lines[0] != "OK":
StatusUpdate(" --> %s" % response_body)
sys.exit(1)
rv.append([lines[1], patch[0]])
return rv
def GuessVCS(options):
"""Helper to guess the version control system.
This examines the current directory, guesses which VersionControlSystem
we're using, and returns an instance of the appropriate class. Exit with an
error if we can't figure it out.
Returns:
A VersionControlSystem instance. Exits if the VCS can't be guessed.
"""
# Mercurial has a command to get the base directory of a repository
# Try running it, but don't die if we don't have hg installed.
# NOTE: we try Mercurial first as it can sit on top of an SVN working copy.
try:
out, returncode = RunShellWithReturnCode(["hg", "root"])
if returncode == 0:
return MercurialVCS(options, out.strip())
except OSError, (errno, message):
if errno != 2: # ENOENT -- they don't have hg installed.
raise
# Subversion has a .svn in all working directories.
if os.path.isdir('.svn'):
logging.info("Guessed VCS = Subversion")
return SubversionVCS(options)
# Git has a command to test if you're in a git tree.
# Try running it, but don't die if we don't have git installed.
try:
out, returncode = RunShellWithReturnCode(["git", "rev-parse",
"--is-inside-work-tree"])
if returncode == 0:
return GitVCS(options)
except OSError, (errno, message):
if errno != 2: # ENOENT -- they don't have git installed.
raise
ErrorExit(("Could not guess version control system. "
"Are you in a working copy directory?"))
def RealMain(argv, data=None):
"""The real main function.
Args:
argv: Command line arguments.
data: Diff contents. If None (default) the diff is generated by
the VersionControlSystem implementation returned by GuessVCS().
Returns:
A 2-tuple (issue id, patchset id).
The patchset id is None if the base files are not uploaded by this
script (applies only to SVN checkouts).
"""
logging.basicConfig(format=("%(asctime).19s %(levelname)s %(filename)s:"
"%(lineno)s %(message)s "))
os.environ['LC_ALL'] = 'C'
options, args = parser.parse_args(argv[1:])
global verbosity
verbosity = options.verbose
if verbosity >= 3:
logging.getLogger().setLevel(logging.DEBUG)
elif verbosity >= 2:
logging.getLogger().setLevel(logging.INFO)
vcs = GuessVCS(options)
if isinstance(vcs, SubversionVCS):
# base field is only allowed for Subversion.
# Note: Fetching base files may become deprecated in future releases.
base = vcs.GuessBase(options.download_base)
else:
base = None
if not base and options.download_base:
options.download_base = True
logging.info("Enabled upload of base file")
if not options.assume_yes:
vcs.CheckForUnknownFiles()
if data is None:
data = vcs.GenerateDiff(args)
files = vcs.GetBaseFiles(data)
if options.min_pylint_score:
print "running pylint..."
has_low_score = 0
for file in files:
if re.search(r'[.]py$', file):
print "pylinting "+file+"..."
res = RunShell(["pylint", file], silent_ok=True, ignore_retcode=True)
match = re.search(r'Your code has been rated at ([0-9.-]+)', res)
try:
score = float(match.group(1))
except:
score = -1.0
print file,"rated at",score
if score < float(options.min_pylint_score):
has_low_score += 1
if has_low_score > 0:
print "pylint reported", has_low_score, \
"files with scores below", options.min_pylint_score
AreYouSureOrExit()
if verbosity >= 1:
print "Upload server:", options.server, "(change with -s/--server)"
if options.issue:
prompt = "Message describing this patch set: "
else:
prompt = "New issue subject: "
message = options.message or raw_input(prompt).strip()
if not message:
ErrorExit("A non-empty message is required")
rpc_server = GetRpcServer(options)
form_fields = [("subject", message)]
if base:
form_fields.append(("base", base))
if options.issue:
form_fields.append(("issue", str(options.issue)))
if options.email:
form_fields.append(("user", options.email))
if options.reviewers:
for reviewer in options.reviewers.split(','):
if "@" in reviewer and not reviewer.split("@")[1].count(".") == 1:
ErrorExit("Invalid email address: %s" % reviewer)
form_fields.append(("reviewers", options.reviewers))
if options.cc:
for cc in options.cc.split(','):
if "@" in cc and not cc.split("@")[1].count(".") == 1:
ErrorExit("Invalid email address: %s" % cc)
form_fields.append(("cc", options.cc))
description = options.description
if options.description_file:
if options.description:
ErrorExit("Can't specify description and description_file")
file = open(options.description_file, 'r')
description = file.read()
file.close()
if options.description_editor:
if options.description:
ErrorExit("Can't specify description and description_editor")
if options.description_file:
ErrorExit("Can't specify description_file and description_editor")
if 'EDITOR' not in os.environ:
ErrorExit("Please set the EDITOR environment variable.")
editor = os.environ['EDITOR']
if editor == None or editor == "":
ErrorExit("Please set the EDITOR environment variable.")
tempfh, filename = tempfile.mkstemp()
msg = "demo URL: http://your-url/foo/\ndescription: (start on next line)\n"
os.write(tempfh, msg)
os.close(tempfh)
print "running EDITOR:", editor, filename
cmd = editor + " " + filename
subprocess.call(cmd, shell=True)
file = open(filename, 'r')
description = file.read()
file.close()
os.unlink(filename)
print description
if description:
form_fields.append(("description", description))
# Send a hash of all the base file so the server can determine if a copy
# already exists in an earlier patchset.
base_hashes = ""
for file, info in files.iteritems():
if not info[0] is None:
checksum = md5.new(info[0]).hexdigest()
if base_hashes:
base_hashes += "|"
base_hashes += checksum + ":" + file
form_fields.append(("base_hashes", base_hashes))
# If we're uploading base files, don't send the email before the uploads, so
# that it contains the file status.
if options.send_mail and options.download_base:
form_fields.append(("send_mail", "1"))
if not options.download_base:
form_fields.append(("content_upload", "1"))
if len(data) > MAX_UPLOAD_SIZE:
print "Patch is large, so uploading file patches separately."
uploaded_diff_file = []
form_fields.append(("separate_patches", "1"))
else:
uploaded_diff_file = [("data", "data.diff", data)]
ctype, body = EncodeMultipartFormData(form_fields, uploaded_diff_file)
response_body = rpc_server.Send("/upload", body, content_type=ctype)
patchset = None
if not options.download_base or not uploaded_diff_file:
lines = response_body.splitlines()
if len(lines) >= 2:
msg = lines[0]
patchset = lines[1].strip()
patches = [x.split(" ", 1) for x in lines[2:]]
else:
msg = response_body
else:
msg = response_body
StatusUpdate(msg)
if not response_body.startswith("Issue created.") and \
not response_body.startswith("Issue updated."):
sys.exit(0)
issue = msg[msg.rfind("/")+1:]
if not uploaded_diff_file:
result = UploadSeparatePatches(issue, rpc_server, patchset, data, options)
if not options.download_base:
patches = result
if not options.download_base:
vcs.UploadBaseFiles(issue, rpc_server, patches, patchset, options, files)
if options.send_mail:
rpc_server.Send("/" + issue + "/mail", payload="")
return issue, patchset
FPREVIEW_ADDR = "footprint2009reviews.appspot.com"
def main():
try:
if len(sys.argv) == 1:
print "Usage:", sys.argv[0], "<email address of primary reviewer>"
print "(automatically cc's", FPREVIEW_ADDR, ")"
sys.exit(1)
args = [sys.argv[0], "-s", "footprint2009reviews.appspot.com"]
args.append("--cc=footprint-engreviews@googlegroups.com")
args.append("--description_editor")
args.append("--send_mail")
args.append("--min_pylint_score")
# we're starting with 9.0
args.append("9.0")
args.append("-r")
email = sys.argv[1]
if email.find("@") == -1:
email += "@gmail.com"
print >>sys.stderr, "*** sending to "+email+" for review. (note: @gmail.com)"
args.append(email)
sys.argv = args + sys.argv[2:]
if "PYLINTRC" not in os.environ:
testpath = os.getcwd()
while testpath != "" and not os.path.exists(testpath + "/pylintrc"):
testpath = re.sub(r'/[^/]*$', '', testpath)
print "checking for "+testpath + "/pylintrc"
if testpath == "":
print >>sys.stderr, "ERROR: couldn't find 'pylintrc' file."
sys.exit(1)
os.environ['PYLINTRC'] = testpath + "/pylintrc"
print "guessing PYLINTRC="+os.environ['PYLINTRC']
print "running: ", " ".join(sys.argv)
RealMain(sys.argv)
except KeyboardInterrupt:
print
StatusUpdate("Interrupted.")
sys.exit(1)
if __name__ == "__main__":
main()
| Python |
#!/usr/bin/env python
#
# Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Exports TSV data over HTTP.
Usage:
%s [flags]
--url=<string> URL endpoint to get exported data. (Required)
--batch_size=<int> Number of Entity objects to include in each post to
smaller the batch size should be. (Default 1000)
--filename=<path> Path to the TSV file to export. (Required)
--digsig=<string> value passed to endpoint permitting export
The exit status will be 0 on success, non-zero on failure.
"""
import sys
import re
import logging
import getopt
import urllib2
import datetime
def PrintUsageExit(code):
print sys.modules['__main__'].__doc__ % sys.argv[0]
sys.stdout.flush()
sys.stderr.flush()
sys.exit(code)
def Pull(filename, url, min_key, delim, prefix):
# get content from url and write to filename
try:
connection = urllib2.urlopen(url);
# TODO: read 100 lines incrementally and show progress
content = connection.read()
connection.close()
except urllib2.URLError, e:
logging.error('%s returned error %i, %s' % (url, e.code, e.msg))
sys.exit(2)
try:
tsv_file = file(filename, 'a')
except IOError:
logging.error("I/O error({0}): {1}".format(errno, os.strerror(errno)))
sys.exit(3)
if prefix:
lines = content.split("\n")
lines.pop()
content = ("%s" % prefix) + ("\n%s" % prefix).join(lines) + "\n"
tsv_file.write(content)
tsv_file.close()
# count the number of lines
list = content.splitlines()
line_count = len(list)
last_line = list[line_count - 1]
if min_key == "":
# that's our header, don't count it
line_count -= 1
# get the key value of the last line
fields = last_line.split(delim)
min_key = fields[0][4:]
return min_key, line_count
def ParseArguments(argv):
opts, args = getopt.getopt(
argv[1:],
'dh',
['debug', 'help',
'url=', 'filename=', 'prefix=', 'digsig=', 'batch_size='
])
url = None
filename = None
digsig = ''
prefix = ''
batch_size = 1000
for option, value in opts:
if option == '--debug':
logging.getLogger().setLevel(logging.DEBUG)
if option in ('-h', '--help'):
PrintUsageExit(0)
if option == '--url':
url = value
if option == '--filename':
filename = value
if option == '--prefix':
prefix = value
if option == '--digsig':
digsig = value
if option == '--batch_size':
batch_size = int(value)
if batch_size <= 0:
print >>sys.stderr, 'batch_size must be 1 or larger'
PrintUsageExit(1)
return (url, filename, batch_size, prefix, digsig)
def main(argv):
logging.basicConfig(
level=logging.INFO,
format='%(levelname)-8s %(asctime)s %(message)s')
args = ParseArguments(argv)
if [arg for arg in args if arg is None]:
print >>sys.stderr, 'Invalid arguments'
PrintUsageExit(1)
url, filename, batch_size, prefix, digsig = args
delim = "\t"
min_key = ""
lines = batch_size + 2
while lines >= batch_size:
url_step = ("%s?digsig=%s&min_key=%s&limit=%s" %
(url, str(digsig), str(min_key), str(batch_size)))
if min_key != "":
log_key = min_key
else:
log_key = "[start]"
t0 = datetime.datetime.now()
min_key, lines = Pull(filename, url_step, min_key, delim, prefix)
#print min_key
diff = datetime.datetime.now() - t0
secs = "%d.%d" % (diff.seconds, diff.microseconds/1000)
logging.info('fetched header + %d in %s secs from %s', lines, secs, log_key)
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
| Python |
#!/usr/bin/env python
#
# Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Creates backup of tables.
"""
import sys
import logging
import getopt
import urllib2
import datetime
from datetime import date
def print_usage_exit(code):
""" print usage and exit """
print sys.modules['__main__'].__doc__ % sys.argv[0]
sys.stdout.flush()
sys.stderr.flush()
sys.exit(code)
def handle_response(url):
""" read the last key and the number of records copied """
try:
connection = urllib2.urlopen(url)
content = connection.read()
connection.close()
except urllib2.URLError, eobj:
logging.error('%s returned error %i, %s' % (url, eobj.code, eobj.msg))
sys.exit(2)
last_key = ""
rows = 0
lines = content.split("\n")
for line in lines:
field = line.split("\t")
if field[0] == "rows":
rows = int(field[1])
elif field[0] == "last_key":
last_key = field[1]
return last_key, rows
def parse_arguments(argv):
""" parse arguments """
opts, args = getopt.getopt(
argv[1:],
'dh',
['debug', 'help', 'url=', 'table=',
'backup_version=', 'restore_version=', 'digsig=', 'batch_size='
])
def lzero(number_string):
""" prepend 0 if length less than 2 """
rtn = number_string
while len(rtn) < 2:
rtn = '0' + rtn
return rtn
url = "http://footprint2009dev.appspot.com/export"
table = ''
tod = date.today()
backup_version = str(tod.year) + lzero(str(tod.month)) + lzero(str(tod.day))
restore_version = ''
digsig = ''
batch_size = 1000
for option, value in opts:
if option == '--debug':
logging.getLogger().setLevel(logging.DEBUG)
if option in ('-h', '--help'):
print_usage_exit(0)
if option == '--url':
url = value
if option == '--backup_version':
backup_version = value
if restore_version:
print >> sys.stderr, 'backup and restore are mutually exclusive'
print_usage_exit(1)
if option == '--restore_version':
restore_version = value
if backup_version:
print >> sys.stderr, 'backup and restore are mutually exclusive'
print_usage_exit(1)
if option == '--table':
table = value
if option == '--digsig':
digsig = value
if option == '--batch_size':
batch_size = int(value)
if batch_size <= 0:
print >> sys.stderr, 'batch_size must be 1 or larger'
print_usage_exit(1)
opts = args # because pylint said args was unused
return (url, table, backup_version, restore_version, batch_size, digsig)
def main(argv):
""" start here """
logging.basicConfig(
level=logging.INFO,
format='%(levelname)-8s %(asctime)s %(message)s')
args = parse_arguments(argv)
if [arg for arg in args if arg is None]:
print >> sys.stderr, 'Invalid arguments'
print_usage_exit(1)
base_url, table, backup_version, restore_version, batch_size, digsig = args
if not base_url:
print >> sys.stderr, 'specify url'
print_usage_exit(1)
if backup_version:
url = "%s/%s/%s_%s" % (base_url, table, table, backup_version)
elif restore_version:
url = "%s/%s_%s/%s" % (base_url, table, table, restore_version)
else:
print >> sys.stderr, 'specify either backup_version or restore_version'
print_usage_exit(1)
min_key = ''
lines = batch_size
while lines == batch_size:
url_step = ("%s?digsig=%s&min_key=%s&limit=%s" %
(url, str(digsig), str(min_key), str(batch_size)))
if min_key != "":
log_key = min_key
else:
log_key = "[start]"
start_time = datetime.datetime.now()
min_key, lines = handle_response(url_step)
diff = datetime.datetime.now() - start_time
secs = "%d.%d" % (diff.seconds, diff.microseconds/1000)
logging.info('fetched %d in %s secs from %s', lines, secs, log_key)
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
| Python |
#!/usr/bin/python
# Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# TODO: remove silly dependency on dapper.net-- thought I'd need
# it for the full scrape, but ended up not going that way.
"""open source load testing tool for footprint."""
import sys
import os
import urllib
import urlparse
import re
import thread
import time
from datetime import datetime
import socket
import random
import cookielib
import getpass
import logging
import hashlib
import mimetypes
import optparse
import os
import re
import socket
import subprocess
import sys
import urllib
import urllib2
import urlparse
import tempfile
try:
import readline
except ImportError:
logging.debug("readline not found.")
pass
# match appengine's timeout
DEFAULT_TIMEOUT = 30
socket.setdefaulttimeout(DEFAULT_TIMEOUT)
# to identify pages vs. hits, we prefix page with a given name
PAGE_NAME_PREFIX = "page_"
# The logging verbosity:
# 0: Errors only.
# 1: Status messages.
# 2: Info logs.
# 3: Debug logs.
VERBOSITY = 1
def AreYouSureOrExit(exit_if_no=True):
prompt = "Are you sure you want to continue?(y/N) "
answer = raw_input(prompt).strip()
if exit_if_no and answer.lower() != "y":
ErrorExit("User aborted")
return answer.lower() == "y"
def GetEmail(prompt):
"""Prompts the user for their email address and returns it.
The last used email address is saved to a file and offered up as a suggestion
to the user. If the user presses enter without typing in anything the last
used email address is used. If the user enters a new address, it is saved
for next time we prompt.
"""
last_email_file_name = os.path.expanduser("~/.last_loadtest_email_address")
last_email = ""
if os.path.exists(last_email_file_name):
try:
last_email_file = open(last_email_file_name, "r")
last_email = last_email_file.readline().strip("\n")
last_email_file.close()
prompt += " [%s]" % last_email
except IOError, e:
pass
email = raw_input(prompt + ": ").strip()
if email:
try:
last_email_file = open(last_email_file_name, "w")
last_email_file.write(email)
last_email_file.close()
except IOError, e:
pass
else:
email = last_email
if email.find("@") == -1:
email += "@gmail.com"
print "assuming you mean "+email+"@gmail.com"
return email
def StatusUpdate(msg):
"""Print a status message to stdout.
If 'VERBOSITY' is greater than 0, print the message.
Args:
msg: The string to print.
"""
if VERBOSITY > 0:
print msg
def ErrorExit(msg):
"""Print an error message to stderr and exit."""
print >>sys.stderr, msg
sys.exit(1)
class ClientLoginError(urllib2.HTTPError):
"""Raised to indicate there was an error authenticating with ClientLogin."""
def __init__(self, url, code, msg, headers, args):
urllib2.HTTPError.__init__(self, url, code, msg, headers, None)
self.args = args
self.reason = args["Error"]
class AbstractRpcServer(object):
"""Provides a common interface for a simple RPC server."""
def __init__(self, host, auth_function, host_override=None, extra_headers={},
save_cookies=False):
"""Creates a new HttpRpcServer.
Args:
host: The host to send requests to.
auth_function: A function that takes no arguments and returns an
(email, password) tuple when called. Will be called if authentication
is required.
host_override: The host header to send to the server (defaults to host).
extra_headers: A dict of extra headers to append to every request.
save_cookies: If True, save the authentication cookies to local disk.
If False, use an in-memory cookiejar instead. Subclasses must
implement this functionality. Defaults to False.
"""
self.host = host
self.host_override = host_override
self.auth_function = auth_function
self.authenticated = False
self.extra_headers = extra_headers
self.save_cookies = save_cookies
self.opener = self._GetOpener()
if self.host_override:
logging.info("Server: %s; Host: %s", self.host, self.host_override)
else:
logging.info("Server: %s", self.host)
def _GetOpener(self):
"""Returns an OpenerDirector for making HTTP requests.
Returns:
A urllib2.OpenerDirector object.
"""
raise NotImplementedError()
def _CreateRequest(self, url, data=None):
"""Creates a new urllib request."""
logging.debug("Creating request for: '%s' with payload:\n%s", url, data)
req = urllib2.Request(url, data=data)
if self.host_override:
req.add_header("Host", self.host_override)
for key, value in self.extra_headers.iteritems():
req.add_header(key, value)
return req
def _GetAuthToken(self, email, password):
"""Uses ClientLogin to authenticate the user, returning an auth token.
Args:
email: The user's email address
password: The user's password
Raises:
ClientLoginError: If there was an error authenticating with ClientLogin.
HTTPError: If there was some other form of HTTP error.
Returns:
The authentication token returned by ClientLogin.
"""
account_type = "GOOGLE"
if self.host.endswith(".google.com"):
# Needed for use inside Google.
account_type = "HOSTED"
account_type = "GOOGLE"
req = self._CreateRequest(
url="https://www.google.com/accounts/ClientLogin",
data=urllib.urlencode({
"Email": email,
"Passwd": password,
"service": "ah",
"source": "rietveld-codereview-upload",
"accountType": account_type,
}),
)
try:
response = self.opener.open(req)
response_body = response.read()
response_dict = dict(x.split("=")
for x in response_body.split("\n") if x)
return response_dict["Auth"]
except urllib2.HTTPError, e:
if e.code == 403:
body = e.read()
response_dict = dict(x.split("=", 1) for x in body.split("\n") if x)
raise ClientLoginError(req.get_full_url(), e.code, e.msg,
e.headers, response_dict)
else:
raise
def _GetAuthCookie(self, auth_token):
"""Fetches authentication cookies for an authentication token.
Args:
auth_token: The authentication token returned by ClientLogin.
Raises:
HTTPError: If there was an error fetching the authentication cookies.
"""
# This is a dummy value to allow us to identify when we're successful.
continue_location = "http://localhost/"
args = {"continue": continue_location, "auth": auth_token}
req = self._CreateRequest("http://%s/_ah/login?%s" %
(self.host, urllib.urlencode(args)))
try:
response = self.opener.open(req)
except urllib2.HTTPError, e:
response = e
if (response.code != 302 or
response.info()["location"] != continue_location):
raise urllib2.HTTPError(req.get_full_url(), response.code, response.msg,
response.headers, response.fp)
self.authenticated = True
def _Authenticate(self):
"""Authenticates the user.
The authentication process works as follows:
1) We get a username and password from the user
2) We use ClientLogin to obtain an AUTH token for the user
(see http://code.google.com/apis/accounts/AuthForInstalledApps.html).
3) We pass the auth token to /_ah/login on the server to obtain an
authentication cookie. If login was successful, it tries to redirect
us to the URL we provided.
If we attempt to access the upload API without first obtaining an
authentication cookie, it returns a 401 response and directs us to
authenticate ourselves with ClientLogin.
"""
for i in range(3):
credentials = self.auth_function()
try:
auth_token = self._GetAuthToken(credentials[0], credentials[1])
except ClientLoginError, e:
if e.reason == "BadAuthentication":
print >>sys.stderr, "Invalid username or password."
continue
if e.reason == "CaptchaRequired":
print >>sys.stderr, (
"Please go to\n"
"https://www.google.com/accounts/DisplayUnlockCaptcha\n"
"and verify you are a human. Then try again.")
break
if e.reason == "NotVerified":
print >>sys.stderr, "Account not verified."
break
if e.reason == "TermsNotAgreed":
print >>sys.stderr, "User has not agreed to TOS."
break
if e.reason == "AccountDeleted":
print >>sys.stderr, "The user account has been deleted."
break
if e.reason == "AccountDisabled":
print >>sys.stderr, "The user account has been disabled."
break
if e.reason == "ServiceDisabled":
print >>sys.stderr, ("The user's access to the service has been "
"disabled.")
break
if e.reason == "ServiceUnavailable":
print >>sys.stderr, "The service is not available; try again later."
break
raise
self._GetAuthCookie(auth_token)
return
def Send(self, request_path, payload=None,
content_type="application/octet-stream",
timeout=None,
**kwargs):
"""Sends an RPC and returns the response.
Args:
request_path: The path to send the request to, eg /api/appversion/create.
payload: The body of the request, or None to send an empty request.
content_type: The Content-Type header to use.
timeout: timeout in seconds; default None i.e. no timeout.
(Note: for large requests on OS X, the timeout doesn't work right.)
kwargs: Any keyword arguments are converted into query string parameters.
Returns:
The response body, as a string.
"""
# TODO: Don't require authentication. Let the server say
# whether it is necessary.
if not self.authenticated:
self._Authenticate()
old_timeout = socket.getdefaulttimeout()
socket.setdefaulttimeout(timeout)
try:
tries = 0
while True:
tries += 1
args = dict(kwargs)
url = "http://%s%s" % (self.host, request_path)
if args:
url += "?" + urllib.urlencode(args)
req = self._CreateRequest(url=url, data=payload)
req.add_header("Content-Type", content_type)
try:
f = self.opener.open(req)
response = f.read()
f.close()
return response
except urllib2.HTTPError, e:
if tries > 3:
raise
elif e.code == 302 or e.code == 401:
self._Authenticate()
elif e.code >= 500 and e.code < 600:
# Server Error - try again.
print "server error "+str(e.code)+": sleeping and retrying..."
time.sleep(1)
continue
else:
raise
finally:
socket.setdefaulttimeout(old_timeout)
class HttpRpcServer(AbstractRpcServer):
"""Provides a simplified RPC-style interface for HTTP requests."""
def _Authenticate(self):
"""Save the cookie jar after authentication."""
super(HttpRpcServer, self)._Authenticate()
if self.save_cookies:
StatusUpdate("Saving authentication cookies to %s" % self.cookie_file)
self.cookie_jar.save()
def _GetOpener(self):
"""Returns an OpenerDirector that supports cookies and ignores redirects.
Returns:
A urllib2.OpenerDirector object.
"""
opener = urllib2.OpenerDirector()
opener.add_handler(urllib2.ProxyHandler())
opener.add_handler(urllib2.UnknownHandler())
opener.add_handler(urllib2.HTTPHandler())
opener.add_handler(urllib2.HTTPDefaultErrorHandler())
opener.add_handler(urllib2.HTTPSHandler())
opener.add_handler(urllib2.HTTPErrorProcessor())
if self.save_cookies:
self.cookie_file = os.path.expanduser("~/.loadtest_cookies")
self.cookie_jar = cookielib.MozillaCookieJar(self.cookie_file)
if os.path.exists(self.cookie_file):
try:
self.cookie_jar.load()
self.authenticated = True
StatusUpdate("Loaded authentication cookies from %s" %
self.cookie_file)
except (cookielib.LoadError, IOError):
# Failed to load cookies - just ignore them.
pass
else:
# Create an empty cookie file with mode 600
fd = os.open(self.cookie_file, os.O_CREAT, 0600)
os.close(fd)
# Always chmod the cookie file
os.chmod(self.cookie_file, 0600)
else:
# Don't save cookies across runs of update.py.
self.cookie_jar = cookielib.CookieJar()
opener.add_handler(urllib2.HTTPCookieProcessor(self.cookie_jar))
return opener
def GetRpcServer(options):
"""Returns an instance of an AbstractRpcServer.
Returns:
A new AbstractRpcServer, on which RPC calls can be made.
"""
def GetUserCredentials():
"""Prompts the user for a username and password."""
email = options.email
if email is None:
email = GetEmail("Email (for capturing appengine quota details)")
password = getpass.getpass("Password for %s: " % email)
return (email, password)
if options.server is None:
options.server = "appengine.google.com"
return HttpRpcServer(options.server, GetUserCredentials,
host_override=options.host,
save_cookies=options.save_cookies)
START_TS = None
RUNNING = False
def start_running():
"""official kickoff, i.e. after any interaction commands."""
global RUNNING, START_TS
RUNNING = True
START_TS = datetime.now()
def secs_since(ts1, ts2):
"""compute seconds since start_running()."""
delta_ts = ts2 - ts1
return 3600*24.0*delta_ts.days + \
1.0*delta_ts.seconds + \
delta_ts.microseconds / 1000000.0
def perfstats(hits, pageviews):
"""computes QPS since start."""
global START_TS
secs_elapsed = secs_since(START_TS, datetime.now())
hit_qps = hits / float(secs_elapsed + 0.01)
pageview_qps = pageviews / float(secs_elapsed + 0.01)
return (secs_elapsed, hit_qps, pageview_qps)
RESULTS = []
RESULTS_lock = thread.allocate_lock()
def append_results(res):
RESULTS_lock.acquire()
RESULTS.append(res)
RESULTS_lock.release()
REQUEST_TYPES = {}
CACHE_HITRATE = {}
REQUEST_FREQ = []
def register_request_type(name, func, freq=10, cache_hitrate="50%"):
"""setup a test case. Default to positive hitrate so we get warm vs.
cold cache stats. Freq is the relative frequency for this type of
request-- larger numbers = larger percentage for the blended results."""
REQUEST_TYPES[name] = func
CACHE_HITRATE[name] = int(re.sub(r'%', '', str(cache_hitrate).strip()))
for i in range(freq):
REQUEST_FREQ.append(name)
#BASE_URL = "http://footprint2009dev.appspot.com/"
BASE_URL = "http://footprint-loadtest.appspot.com/"
def disable_caching(url):
"""footprint-specific method to disable caching."""
if url.find("?") > 0:
# note: ?& is harmless
return url + "&cache=0"
else:
return url + "?cache=0"
URLS_SEEN = {}
def make_request(cached, url):
"""actually make HTTP request."""
if not cached:
url = disable_caching(url)
if url not in URLS_SEEN:
seen_url = re.sub(re.compile("^"+BASE_URL), '/', url)
print "fetching "+seen_url
URLS_SEEN[url] = True
try:
infh = urllib.urlopen(url)
content = infh.read()
except:
print "error reading "+url
content = ""
return content
def search_url(base, loc="Chicago,IL", keyword="park"):
"""construct FP search URL, defaulting to [park] near [Chicago,IL]"""
if BASE_URL[-1] == '/' and base[0] == '/':
url = BASE_URL+base[1:]
else:
url = BASE_URL+base
if loc and loc != "":
url += "&vol_loc="+loc
if keyword and keyword != "":
url += "&q="+keyword
return url
def error_request(name, cached=False):
"""requests for 404 junk on the site. Here mostly to prove that
the framework does catch errors."""
if make_request(cached, BASE_URL+"foo") == "":
return ""
return "no content"
register_request_type("error", error_request, freq=5)
def static_url():
"""all static requests are roughly equivalent."""
return BASE_URL+"images/background-gradient.png"
def fp_find_embedded_objects(base_url, content):
"""cheesy little HTML parser, which also approximates browser caching
of items on both / and /ui_snippets."""
objs = []
# strip newlines/etc. used in formatting
content = re.sub(r'\s+', ' ', content)
# one HTML element per line
content = re.sub(r'>', '>\n', content)
for line in content.split('\n'):
#print "found line: "+line
match = re.search(r'<(?:img[^>]+src|script[^>]+src|link[^>]+href)\s*=\s*(.+)',
line)
if match:
match2 = re.search(r'^["\'](.+?)["\']', match.group(1))
url = match2.group(1)
url = re.sub(r'[.][.]/images/', 'images/', url)
url = urlparse.urljoin(base_url, url)
#print "found url: "+url+"\n on base: "+base_url
if url not in objs:
objs.append(url)
return objs
static_content_request_queue = []
static_content_request_lock = thread.allocate_lock()
def fetch_static_content(base_url, content):
"""find the embedded JS/CSS/images and request them."""
urls = fp_find_embedded_objects(base_url, content)
static_content_request_lock.acquire()
static_content_request_queue.extend(urls)
static_content_request_lock.release()
def static_fetcher_main():
"""thread for fetching static content."""
while RUNNING:
if len(static_content_request_queue) == 0:
time.sleep(1)
continue
url = None
static_content_request_lock.acquire()
if len(static_content_request_queue) > 0:
url = static_content_request_queue.pop(0)
static_content_request_lock.release()
if url:
# for static content, caching means client/proxy-side
cached = (random.randint(0, 99) < OPTIONS.static_content_hitrate)
if cached:
continue
ts1 = datetime.now()
content = make_request(False, url)
elapsed = secs_since(ts1, datetime.now())
result_name = "static content requests"
if content == "":
result_name += " (errors)"
append_results([result_name, elapsed])
def homepage_request(name, cached=False):
"""request to FP homepage."""
content = make_request(cached, BASE_URL)
content += make_request(cached, search_url("/ui_snippets?", keyword=""))
return content
register_request_type("page_home", homepage_request)
def initial_serp_request(name, cached=False):
content = make_request(cached, search_url("/search#"))
content += make_request(cached, search_url("/ui_snippets?"))
return content
# don't expect much caching-- use 10% hitrate so we can see warm vs. cold stats
register_request_type("page_serp_initial", initial_serp_request, cache_hitrate="10%")
def nextpage_serp_request(name, cached=False):
# statistically, nextpage is page 2
# 50% hitrate due to the overfetch algorithm
if make_request(cached, search_url("/ui_snippets?start=11")) == "":
return ""
# we expect next-page static content to be 100% cacheable
# so don't return content
return "no content"
# nextpage is relatively rare, but this includes all pagination requests
register_request_type("page_serp_next", nextpage_serp_request, freq=5)
def api_request(name, cached=False):
# API calls are probably more likely to ask for more results and/or paginate
if make_request(cached, search_url("/api/volopps?num=20&key=testkey")) == "":
return ""
# API requests don't create static content requests
return "no content"
# until we have more apps, API calls will be rare
register_request_type("page_api", api_request, freq=2)
def setup_tests():
request_type_counts = {}
for name in REQUEST_FREQ:
if name in request_type_counts:
request_type_counts[name] += 1.0
else:
request_type_counts[name] = 1.0
print "OPTIONS.page_fetchers: %d" % OPTIONS.page_fetchers
print "OPTIONS.static_fetchers: %d" % OPTIONS.static_fetchers
print "OPTIONS.static_content_hitrate: %d%%" % OPTIONS.static_content_hitrate
print "request type breakdown:"
for name, cnt in request_type_counts.iteritems():
print " %4.1f%% - %4d%% cache hitrate - %s" % \
(100.0*cnt/float(len(REQUEST_FREQ)), CACHE_HITRATE[name], name)
def run_tests():
# give the threading system a chance to startup
while RUNNING:
testname = REQUEST_FREQ[random.randint(0, len(REQUEST_FREQ)-1)]
func = REQUEST_TYPES[testname]
cached = (random.randint(0, 99) < CACHE_HITRATE[testname])
ts1 = datetime.now()
content = func(testname, cached)
elapsed = secs_since(ts1, datetime.now())
if cached:
result_name = testname + " (warm cache)"
else:
result_name = testname + " (cold cache)"
# don't count static content towards latency--
# too hard to model CSS/JS execution costs, HTTP pipelining
# and parallel fetching. But we do want to create load on the
# servers
if content and content != "":
fetch_static_content(BASE_URL, content)
else:
result_name = testname + " (errors)"
append_results([result_name, elapsed])
def main():
global RUNNING
setup_tests()
start_running()
for i in range(OPTIONS.page_fetchers):
thread.start_new_thread(run_tests, ())
for i in range(OPTIONS.static_fetchers):
thread.start_new_thread(static_fetcher_main, ())
while RUNNING:
time.sleep(2)
pageviews = 0
hit_reqs = len(RESULTS)
# important to look at a snapshot-- RESULTS is appended by other threads
for i in range(0, hit_reqs-1):
if RESULTS[i][0].find(PAGE_NAME_PREFIX) == 0:
pageviews += 1
total_secs_elapsed, hit_qps, pageview_qps = perfstats(hit_reqs, pageviews)
print " %4.1f: %d hits (%.1f hits/sec), %d pageviews (%.1f pv/sec)" % \
(total_secs_elapsed, len(RESULTS), hit_qps, pageviews, pageview_qps)
sum_elapsed_time = {}
counts = {}
for i in range(0, hit_reqs-1):
name, elapsed_time = RESULTS[i]
if name in sum_elapsed_time:
sum_elapsed_time[name] += elapsed_time
counts[name] += 1
else:
sum_elapsed_time[name] = elapsed_time
counts[name] = 1
total_counts = 0
for name in counts:
total_counts += counts[name]
for name in sorted(sum_elapsed_time):
print " %4d requests (%4.1f%%), %6dms avg latency for %s" % \
(counts[name], float(counts[name]*100)/float(total_counts+0.01),
int(1000*sum_elapsed_time[name]/counts[name]), name)
if total_secs_elapsed >= OPTIONS.run_time:
RUNNING = False
OPTIONS = None
def get_options():
global OPTIONS
parser = optparse.OptionParser(usage="%prog [options]")
# testing options
group = parser.add_option_group("Load testing options")
group.add_option("-r", "--run_time", type="int", default=20,
dest="run_time",
help="how long to run the test (seconds).")
group.add_option("-n", "--page_fetchers", type="int", dest="page_fetchers",
default=4, help="how many pageview fetchers.")
group.add_option("--static_fetchers", type="int", dest="static_fetchers",
default=3, help="how many static content fetchers.")
group.add_option("--static_content_hitrate", type="int",
dest="static_content_hitrate", default=80,
help="client-side hitrate on static content (percent)."+
"note: 100 = don't simulate fetching of static content.")
# server
group = parser.add_option_group("Quota server options")
group.add_option("-s", "--server", action="store", dest="server",
default="appengine.google.com",
metavar="SERVER",
help=("The server with the quota info. The format is host[:port]. "
"Defaults to 'appengine.google.com'."))
group.add_option("-e", "--email", action="store", dest="email",
metavar="EMAIL", default=None,
help="The username to use. Will prompt if omitted.")
group.add_option("-H", "--host", action="store", dest="host",
metavar="HOST", default=None,
help="Overrides the Host header sent with all RPCs.")
group.add_option("--no_cookies", action="store_false",
dest="save_cookies", default=True,
help="Do not save authentication cookies to local disk.")
OPTIONS, args = parser.parse_args(sys.argv[1:])
def get_quota_details():
global OPTIONS
rpc_server = GetRpcServer(OPTIONS)
response_body = rpc_server.Send("/dashboard/quotadetails",
app_id="footprint-loadtest")
# get everything onto one line for easy parsing
content = re.sub("\n", " ", response_body)
content = re.sub("\s+", " ", content)
content = re.sub("> <", "><", content)
content = re.sub("<h3>", "\n<h3>", content)
details = {}
for line in content.split("\n"):
for header in re.finditer("<h3>(.+?)</h3>", line):
category = header.group(1)
for match in re.finditer('<tr><td>([a-zA-Z ]+)</td><td>.+?'+
'>\s*([0-9.+-]+) of ([0-9.+-]+)( [a-zA-Z0-9 ]+ )?',
line):
name = match.group(1)
value = float(match.group(2))
quota = float(match.group(3))
units = match.group(4)
if units == None:
units = ""
else:
units = units.strip()
if name != category:
name = re.sub(re.compile(category+"\s*"), r'', name)
details[category+"."+name] = [value, quota, units]
return details
def fmtnum(num):
"""add commas to a float."""
num = str(num)
while True:
oldnum = num
num = re.sub(r'(\d)(\d\d\d[^\d])', r'\1,\2', oldnum)
if oldnum == num:
break
num = re.sub(r'([.]\d\d)\d+$', r'\1', num)
num = re.sub(r'[.]0+$', r'', num)
return num
if __name__ == "__main__":
#logging.getLogger().setLevel(logging.DEBUG)
get_options()
start_details = get_quota_details()
main()
end_details = get_quota_details()
for key in start_details:
startval = start_details[key][0]
endval = end_details[key][0]
quota = end_details[key][1]
units = end_details[key][2]
delta = endval - startval
day_delta = 86400.0 / OPTIONS.run_time * delta
if quota > 0.0:
delta_pct = "%.1f%%" % (100.0 * day_delta / quota)
else:
delta_pct = "0.0%"
if delta < 0.0001:
continue
print "%45s: %6s of quota: %s used, which scales to %s of %s %s / day." % \
(key, delta_pct, fmtnum(delta), fmtnum(day_delta), fmtnum(quota), units)
| Python |
#!/usr/bin/python
# Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# TODO: remove silly dependency on dapper.net-- thought I'd need
# it for the full scrape, but ended up not going that way.
"""open source load testing tool for footprint."""
import sys
import os
import urllib
import urlparse
import re
import thread
import time
from datetime import datetime
import socket
import random
import cookielib
import getpass
import logging
import hashlib
import mimetypes
import optparse
import os
import re
import socket
import subprocess
import sys
import urllib
import urllib2
import urlparse
import tempfile
try:
import readline
except ImportError:
logging.debug("readline not found.")
pass
# match appengine's timeout
DEFAULT_TIMEOUT = 30
socket.setdefaulttimeout(DEFAULT_TIMEOUT)
# to identify pages vs. hits, we prefix page with a given name
PAGE_NAME_PREFIX = "page_"
# The logging verbosity:
# 0: Errors only.
# 1: Status messages.
# 2: Info logs.
# 3: Debug logs.
VERBOSITY = 1
def AreYouSureOrExit(exit_if_no=True):
prompt = "Are you sure you want to continue?(y/N) "
answer = raw_input(prompt).strip()
if exit_if_no and answer.lower() != "y":
ErrorExit("User aborted")
return answer.lower() == "y"
def GetEmail(prompt):
"""Prompts the user for their email address and returns it.
The last used email address is saved to a file and offered up as a suggestion
to the user. If the user presses enter without typing in anything the last
used email address is used. If the user enters a new address, it is saved
for next time we prompt.
"""
last_email_file_name = os.path.expanduser("~/.last_loadtest_email_address")
last_email = ""
if os.path.exists(last_email_file_name):
try:
last_email_file = open(last_email_file_name, "r")
last_email = last_email_file.readline().strip("\n")
last_email_file.close()
prompt += " [%s]" % last_email
except IOError, e:
pass
email = raw_input(prompt + ": ").strip()
if email:
try:
last_email_file = open(last_email_file_name, "w")
last_email_file.write(email)
last_email_file.close()
except IOError, e:
pass
else:
email = last_email
if email.find("@") == -1:
email += "@gmail.com"
print "assuming you mean "+email+"@gmail.com"
return email
def StatusUpdate(msg):
"""Print a status message to stdout.
If 'VERBOSITY' is greater than 0, print the message.
Args:
msg: The string to print.
"""
if VERBOSITY > 0:
print msg
def ErrorExit(msg):
"""Print an error message to stderr and exit."""
print >>sys.stderr, msg
sys.exit(1)
class ClientLoginError(urllib2.HTTPError):
"""Raised to indicate there was an error authenticating with ClientLogin."""
def __init__(self, url, code, msg, headers, args):
urllib2.HTTPError.__init__(self, url, code, msg, headers, None)
self.args = args
self.reason = args["Error"]
class AbstractRpcServer(object):
"""Provides a common interface for a simple RPC server."""
def __init__(self, host, auth_function, host_override=None, extra_headers={},
save_cookies=False):
"""Creates a new HttpRpcServer.
Args:
host: The host to send requests to.
auth_function: A function that takes no arguments and returns an
(email, password) tuple when called. Will be called if authentication
is required.
host_override: The host header to send to the server (defaults to host).
extra_headers: A dict of extra headers to append to every request.
save_cookies: If True, save the authentication cookies to local disk.
If False, use an in-memory cookiejar instead. Subclasses must
implement this functionality. Defaults to False.
"""
self.host = host
self.host_override = host_override
self.auth_function = auth_function
self.authenticated = False
self.extra_headers = extra_headers
self.save_cookies = save_cookies
self.opener = self._GetOpener()
if self.host_override:
logging.info("Server: %s; Host: %s", self.host, self.host_override)
else:
logging.info("Server: %s", self.host)
def _GetOpener(self):
"""Returns an OpenerDirector for making HTTP requests.
Returns:
A urllib2.OpenerDirector object.
"""
raise NotImplementedError()
def _CreateRequest(self, url, data=None):
"""Creates a new urllib request."""
logging.debug("Creating request for: '%s' with payload:\n%s", url, data)
req = urllib2.Request(url, data=data)
if self.host_override:
req.add_header("Host", self.host_override)
for key, value in self.extra_headers.iteritems():
req.add_header(key, value)
return req
def _GetAuthToken(self, email, password):
"""Uses ClientLogin to authenticate the user, returning an auth token.
Args:
email: The user's email address
password: The user's password
Raises:
ClientLoginError: If there was an error authenticating with ClientLogin.
HTTPError: If there was some other form of HTTP error.
Returns:
The authentication token returned by ClientLogin.
"""
account_type = "GOOGLE"
if self.host.endswith(".google.com"):
# Needed for use inside Google.
account_type = "HOSTED"
account_type = "GOOGLE"
req = self._CreateRequest(
url="https://www.google.com/accounts/ClientLogin",
data=urllib.urlencode({
"Email": email,
"Passwd": password,
"service": "ah",
"source": "rietveld-codereview-upload",
"accountType": account_type,
}),
)
try:
response = self.opener.open(req)
response_body = response.read()
response_dict = dict(x.split("=")
for x in response_body.split("\n") if x)
return response_dict["Auth"]
except urllib2.HTTPError, e:
if e.code == 403:
body = e.read()
response_dict = dict(x.split("=", 1) for x in body.split("\n") if x)
raise ClientLoginError(req.get_full_url(), e.code, e.msg,
e.headers, response_dict)
else:
raise
def _GetAuthCookie(self, auth_token):
"""Fetches authentication cookies for an authentication token.
Args:
auth_token: The authentication token returned by ClientLogin.
Raises:
HTTPError: If there was an error fetching the authentication cookies.
"""
# This is a dummy value to allow us to identify when we're successful.
continue_location = "http://localhost/"
args = {"continue": continue_location, "auth": auth_token}
req = self._CreateRequest("http://%s/_ah/login?%s" %
(self.host, urllib.urlencode(args)))
try:
response = self.opener.open(req)
except urllib2.HTTPError, e:
response = e
if (response.code != 302 or
response.info()["location"] != continue_location):
raise urllib2.HTTPError(req.get_full_url(), response.code, response.msg,
response.headers, response.fp)
self.authenticated = True
def _Authenticate(self):
"""Authenticates the user.
The authentication process works as follows:
1) We get a username and password from the user
2) We use ClientLogin to obtain an AUTH token for the user
(see http://code.google.com/apis/accounts/AuthForInstalledApps.html).
3) We pass the auth token to /_ah/login on the server to obtain an
authentication cookie. If login was successful, it tries to redirect
us to the URL we provided.
If we attempt to access the upload API without first obtaining an
authentication cookie, it returns a 401 response and directs us to
authenticate ourselves with ClientLogin.
"""
for i in range(3):
credentials = self.auth_function()
try:
auth_token = self._GetAuthToken(credentials[0], credentials[1])
except ClientLoginError, e:
if e.reason == "BadAuthentication":
print >>sys.stderr, "Invalid username or password."
continue
if e.reason == "CaptchaRequired":
print >>sys.stderr, (
"Please go to\n"
"https://www.google.com/accounts/DisplayUnlockCaptcha\n"
"and verify you are a human. Then try again.")
break
if e.reason == "NotVerified":
print >>sys.stderr, "Account not verified."
break
if e.reason == "TermsNotAgreed":
print >>sys.stderr, "User has not agreed to TOS."
break
if e.reason == "AccountDeleted":
print >>sys.stderr, "The user account has been deleted."
break
if e.reason == "AccountDisabled":
print >>sys.stderr, "The user account has been disabled."
break
if e.reason == "ServiceDisabled":
print >>sys.stderr, ("The user's access to the service has been "
"disabled.")
break
if e.reason == "ServiceUnavailable":
print >>sys.stderr, "The service is not available; try again later."
break
raise
self._GetAuthCookie(auth_token)
return
def Send(self, request_path, payload=None,
content_type="application/octet-stream",
timeout=None,
**kwargs):
"""Sends an RPC and returns the response.
Args:
request_path: The path to send the request to, eg /api/appversion/create.
payload: The body of the request, or None to send an empty request.
content_type: The Content-Type header to use.
timeout: timeout in seconds; default None i.e. no timeout.
(Note: for large requests on OS X, the timeout doesn't work right.)
kwargs: Any keyword arguments are converted into query string parameters.
Returns:
The response body, as a string.
"""
# TODO: Don't require authentication. Let the server say
# whether it is necessary.
if not self.authenticated:
self._Authenticate()
old_timeout = socket.getdefaulttimeout()
socket.setdefaulttimeout(timeout)
try:
tries = 0
while True:
tries += 1
args = dict(kwargs)
url = "http://%s%s" % (self.host, request_path)
if args:
url += "?" + urllib.urlencode(args)
req = self._CreateRequest(url=url, data=payload)
req.add_header("Content-Type", content_type)
try:
f = self.opener.open(req)
response = f.read()
f.close()
return response
except urllib2.HTTPError, e:
if tries > 3:
raise
elif e.code == 302 or e.code == 401:
self._Authenticate()
elif e.code >= 500 and e.code < 600:
# Server Error - try again.
print "server error "+str(e.code)+": sleeping and retrying..."
time.sleep(1)
continue
else:
raise
finally:
socket.setdefaulttimeout(old_timeout)
class HttpRpcServer(AbstractRpcServer):
"""Provides a simplified RPC-style interface for HTTP requests."""
def _Authenticate(self):
"""Save the cookie jar after authentication."""
super(HttpRpcServer, self)._Authenticate()
if self.save_cookies:
StatusUpdate("Saving authentication cookies to %s" % self.cookie_file)
self.cookie_jar.save()
def _GetOpener(self):
"""Returns an OpenerDirector that supports cookies and ignores redirects.
Returns:
A urllib2.OpenerDirector object.
"""
opener = urllib2.OpenerDirector()
opener.add_handler(urllib2.ProxyHandler())
opener.add_handler(urllib2.UnknownHandler())
opener.add_handler(urllib2.HTTPHandler())
opener.add_handler(urllib2.HTTPDefaultErrorHandler())
opener.add_handler(urllib2.HTTPSHandler())
opener.add_handler(urllib2.HTTPErrorProcessor())
if self.save_cookies:
self.cookie_file = os.path.expanduser("~/.loadtest_cookies")
self.cookie_jar = cookielib.MozillaCookieJar(self.cookie_file)
if os.path.exists(self.cookie_file):
try:
self.cookie_jar.load()
self.authenticated = True
StatusUpdate("Loaded authentication cookies from %s" %
self.cookie_file)
except (cookielib.LoadError, IOError):
# Failed to load cookies - just ignore them.
pass
else:
# Create an empty cookie file with mode 600
fd = os.open(self.cookie_file, os.O_CREAT, 0600)
os.close(fd)
# Always chmod the cookie file
os.chmod(self.cookie_file, 0600)
else:
# Don't save cookies across runs of update.py.
self.cookie_jar = cookielib.CookieJar()
opener.add_handler(urllib2.HTTPCookieProcessor(self.cookie_jar))
return opener
def GetRpcServer(options):
"""Returns an instance of an AbstractRpcServer.
Returns:
A new AbstractRpcServer, on which RPC calls can be made.
"""
def GetUserCredentials():
"""Prompts the user for a username and password."""
email = options.email
if email is None:
email = GetEmail("Email (for capturing appengine quota details)")
password = getpass.getpass("Password for %s: " % email)
return (email, password)
if options.server is None:
options.server = "appengine.google.com"
return HttpRpcServer(options.server, GetUserCredentials,
host_override=options.host,
save_cookies=options.save_cookies)
START_TS = None
RUNNING = False
def start_running():
"""official kickoff, i.e. after any interaction commands."""
global RUNNING, START_TS
RUNNING = True
START_TS = datetime.now()
def secs_since(ts1, ts2):
"""compute seconds since start_running()."""
delta_ts = ts2 - ts1
return 3600*24.0*delta_ts.days + \
1.0*delta_ts.seconds + \
delta_ts.microseconds / 1000000.0
def perfstats(hits, pageviews):
"""computes QPS since start."""
global START_TS
secs_elapsed = secs_since(START_TS, datetime.now())
hit_qps = hits / float(secs_elapsed + 0.01)
pageview_qps = pageviews / float(secs_elapsed + 0.01)
return (secs_elapsed, hit_qps, pageview_qps)
RESULTS = []
RESULTS_lock = thread.allocate_lock()
def append_results(res):
RESULTS_lock.acquire()
RESULTS.append(res)
RESULTS_lock.release()
REQUEST_TYPES = {}
CACHE_HITRATE = {}
REQUEST_FREQ = []
def register_request_type(name, func, freq=10, cache_hitrate="50%"):
"""setup a test case. Default to positive hitrate so we get warm vs.
cold cache stats. Freq is the relative frequency for this type of
request-- larger numbers = larger percentage for the blended results."""
REQUEST_TYPES[name] = func
CACHE_HITRATE[name] = int(re.sub(r'%', '', str(cache_hitrate).strip()))
for i in range(freq):
REQUEST_FREQ.append(name)
#BASE_URL = "http://footprint2009dev.appspot.com/"
BASE_URL = "http://footprint-loadtest.appspot.com/"
def disable_caching(url):
"""footprint-specific method to disable caching."""
if url.find("?") > 0:
# note: ?& is harmless
return url + "&cache=0"
else:
return url + "?cache=0"
URLS_SEEN = {}
def make_request(cached, url):
"""actually make HTTP request."""
if not cached:
url = disable_caching(url)
if url not in URLS_SEEN:
seen_url = re.sub(re.compile("^"+BASE_URL), '/', url)
print "fetching "+seen_url
URLS_SEEN[url] = True
try:
infh = urllib.urlopen(url)
content = infh.read()
except:
print "error reading "+url
content = ""
return content
def search_url(base, loc="Chicago,IL", keyword="park"):
"""construct FP search URL, defaulting to [park] near [Chicago,IL]"""
if BASE_URL[-1] == '/' and base[0] == '/':
url = BASE_URL+base[1:]
else:
url = BASE_URL+base
if loc and loc != "":
url += "&vol_loc="+loc
if keyword and keyword != "":
url += "&q="+keyword
return url
def error_request(name, cached=False):
"""requests for 404 junk on the site. Here mostly to prove that
the framework does catch errors."""
if make_request(cached, BASE_URL+"foo") == "":
return ""
return "no content"
register_request_type("error", error_request, freq=5)
def static_url():
"""all static requests are roughly equivalent."""
return BASE_URL+"images/background-gradient.png"
def fp_find_embedded_objects(base_url, content):
"""cheesy little HTML parser, which also approximates browser caching
of items on both / and /ui_snippets."""
objs = []
# strip newlines/etc. used in formatting
content = re.sub(r'\s+', ' ', content)
# one HTML element per line
content = re.sub(r'>', '>\n', content)
for line in content.split('\n'):
#print "found line: "+line
match = re.search(r'<(?:img[^>]+src|script[^>]+src|link[^>]+href)\s*=\s*(.+)',
line)
if match:
match2 = re.search(r'^["\'](.+?)["\']', match.group(1))
url = match2.group(1)
url = re.sub(r'[.][.]/images/', 'images/', url)
url = urlparse.urljoin(base_url, url)
#print "found url: "+url+"\n on base: "+base_url
if url not in objs:
objs.append(url)
return objs
static_content_request_queue = []
static_content_request_lock = thread.allocate_lock()
def fetch_static_content(base_url, content):
"""find the embedded JS/CSS/images and request them."""
urls = fp_find_embedded_objects(base_url, content)
static_content_request_lock.acquire()
static_content_request_queue.extend(urls)
static_content_request_lock.release()
def static_fetcher_main():
"""thread for fetching static content."""
while RUNNING:
if len(static_content_request_queue) == 0:
time.sleep(1)
continue
url = None
static_content_request_lock.acquire()
if len(static_content_request_queue) > 0:
url = static_content_request_queue.pop(0)
static_content_request_lock.release()
if url:
# for static content, caching means client/proxy-side
cached = (random.randint(0, 99) < OPTIONS.static_content_hitrate)
if cached:
continue
ts1 = datetime.now()
content = make_request(False, url)
elapsed = secs_since(ts1, datetime.now())
result_name = "static content requests"
if content == "":
result_name += " (errors)"
append_results([result_name, elapsed])
def homepage_request(name, cached=False):
"""request to FP homepage."""
content = make_request(cached, BASE_URL)
content += make_request(cached, search_url("/ui_snippets?", keyword=""))
return content
register_request_type("page_home", homepage_request)
def initial_serp_request(name, cached=False):
content = make_request(cached, search_url("/search#"))
content += make_request(cached, search_url("/ui_snippets?"))
return content
# don't expect much caching-- use 10% hitrate so we can see warm vs. cold stats
register_request_type("page_serp_initial", initial_serp_request, cache_hitrate="10%")
def nextpage_serp_request(name, cached=False):
# statistically, nextpage is page 2
# 50% hitrate due to the overfetch algorithm
if make_request(cached, search_url("/ui_snippets?start=11")) == "":
return ""
# we expect next-page static content to be 100% cacheable
# so don't return content
return "no content"
# nextpage is relatively rare, but this includes all pagination requests
register_request_type("page_serp_next", nextpage_serp_request, freq=5)
def api_request(name, cached=False):
# API calls are probably more likely to ask for more results and/or paginate
if make_request(cached, search_url("/api/volopps?num=20&key=testkey")) == "":
return ""
# API requests don't create static content requests
return "no content"
# until we have more apps, API calls will be rare
register_request_type("page_api", api_request, freq=2)
def setup_tests():
request_type_counts = {}
for name in REQUEST_FREQ:
if name in request_type_counts:
request_type_counts[name] += 1.0
else:
request_type_counts[name] = 1.0
print "OPTIONS.page_fetchers: %d" % OPTIONS.page_fetchers
print "OPTIONS.static_fetchers: %d" % OPTIONS.static_fetchers
print "OPTIONS.static_content_hitrate: %d%%" % OPTIONS.static_content_hitrate
print "request type breakdown:"
for name, cnt in request_type_counts.iteritems():
print " %4.1f%% - %4d%% cache hitrate - %s" % \
(100.0*cnt/float(len(REQUEST_FREQ)), CACHE_HITRATE[name], name)
def run_tests():
# give the threading system a chance to startup
while RUNNING:
testname = REQUEST_FREQ[random.randint(0, len(REQUEST_FREQ)-1)]
func = REQUEST_TYPES[testname]
cached = (random.randint(0, 99) < CACHE_HITRATE[testname])
ts1 = datetime.now()
content = func(testname, cached)
elapsed = secs_since(ts1, datetime.now())
if cached:
result_name = testname + " (warm cache)"
else:
result_name = testname + " (cold cache)"
# don't count static content towards latency--
# too hard to model CSS/JS execution costs, HTTP pipelining
# and parallel fetching. But we do want to create load on the
# servers
if content and content != "":
fetch_static_content(BASE_URL, content)
else:
result_name = testname + " (errors)"
append_results([result_name, elapsed])
def main():
global RUNNING
setup_tests()
start_running()
for i in range(OPTIONS.page_fetchers):
thread.start_new_thread(run_tests, ())
for i in range(OPTIONS.static_fetchers):
thread.start_new_thread(static_fetcher_main, ())
while RUNNING:
time.sleep(2)
pageviews = 0
hit_reqs = len(RESULTS)
# important to look at a snapshot-- RESULTS is appended by other threads
for i in range(0, hit_reqs-1):
if RESULTS[i][0].find(PAGE_NAME_PREFIX) == 0:
pageviews += 1
total_secs_elapsed, hit_qps, pageview_qps = perfstats(hit_reqs, pageviews)
print " %4.1f: %d hits (%.1f hits/sec), %d pageviews (%.1f pv/sec)" % \
(total_secs_elapsed, len(RESULTS), hit_qps, pageviews, pageview_qps)
sum_elapsed_time = {}
counts = {}
for i in range(0, hit_reqs-1):
name, elapsed_time = RESULTS[i]
if name in sum_elapsed_time:
sum_elapsed_time[name] += elapsed_time
counts[name] += 1
else:
sum_elapsed_time[name] = elapsed_time
counts[name] = 1
total_counts = 0
for name in counts:
total_counts += counts[name]
for name in sorted(sum_elapsed_time):
print " %4d requests (%4.1f%%), %6dms avg latency for %s" % \
(counts[name], float(counts[name]*100)/float(total_counts+0.01),
int(1000*sum_elapsed_time[name]/counts[name]), name)
if total_secs_elapsed >= OPTIONS.run_time:
RUNNING = False
OPTIONS = None
def get_options():
global OPTIONS
parser = optparse.OptionParser(usage="%prog [options]")
# testing options
group = parser.add_option_group("Load testing options")
group.add_option("-r", "--run_time", type="int", default=20,
dest="run_time",
help="how long to run the test (seconds).")
group.add_option("-n", "--page_fetchers", type="int", dest="page_fetchers",
default=4, help="how many pageview fetchers.")
group.add_option("--static_fetchers", type="int", dest="static_fetchers",
default=3, help="how many static content fetchers.")
group.add_option("--static_content_hitrate", type="int",
dest="static_content_hitrate", default=80,
help="client-side hitrate on static content (percent)."+
"note: 100 = don't simulate fetching of static content.")
# server
group = parser.add_option_group("Quota server options")
group.add_option("-s", "--server", action="store", dest="server",
default="appengine.google.com",
metavar="SERVER",
help=("The server with the quota info. The format is host[:port]. "
"Defaults to 'appengine.google.com'."))
group.add_option("-e", "--email", action="store", dest="email",
metavar="EMAIL", default=None,
help="The username to use. Will prompt if omitted.")
group.add_option("-H", "--host", action="store", dest="host",
metavar="HOST", default=None,
help="Overrides the Host header sent with all RPCs.")
group.add_option("--no_cookies", action="store_false",
dest="save_cookies", default=True,
help="Do not save authentication cookies to local disk.")
OPTIONS, args = parser.parse_args(sys.argv[1:])
def get_quota_details():
global OPTIONS
rpc_server = GetRpcServer(OPTIONS)
response_body = rpc_server.Send("/dashboard/quotadetails",
app_id="footprint-loadtest")
# get everything onto one line for easy parsing
content = re.sub("\n", " ", response_body)
content = re.sub("\s+", " ", content)
content = re.sub("> <", "><", content)
content = re.sub("<h3>", "\n<h3>", content)
details = {}
for line in content.split("\n"):
for header in re.finditer("<h3>(.+?)</h3>", line):
category = header.group(1)
for match in re.finditer('<tr><td>([a-zA-Z ]+)</td><td>.+?'+
'>\s*([0-9.+-]+) of ([0-9.+-]+)( [a-zA-Z0-9 ]+ )?',
line):
name = match.group(1)
value = float(match.group(2))
quota = float(match.group(3))
units = match.group(4)
if units == None:
units = ""
else:
units = units.strip()
if name != category:
name = re.sub(re.compile(category+"\s*"), r'', name)
details[category+"."+name] = [value, quota, units]
return details
def fmtnum(num):
"""add commas to a float."""
num = str(num)
while True:
oldnum = num
num = re.sub(r'(\d)(\d\d\d[^\d])', r'\1,\2', oldnum)
if oldnum == num:
break
num = re.sub(r'([.]\d\d)\d+$', r'\1', num)
num = re.sub(r'[.]0+$', r'', num)
return num
if __name__ == "__main__":
#logging.getLogger().setLevel(logging.DEBUG)
get_options()
start_details = get_quota_details()
main()
end_details = get_quota_details()
for key in start_details:
startval = start_details[key][0]
endval = end_details[key][0]
quota = end_details[key][1]
units = end_details[key][2]
delta = endval - startval
day_delta = 86400.0 / OPTIONS.run_time * delta
if quota > 0.0:
delta_pct = "%.1f%%" % (100.0 * day_delta / quota)
else:
delta_pct = "0.0%"
if delta < 0.0001:
continue
print "%45s: %6s of quota: %s used, which scales to %s of %s %s / day." % \
(key, delta_pct, fmtnum(delta), fmtnum(day_delta), fmtnum(quota), units)
| Python |
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tool for uploading diffs from a version control system to the codereview app.
Usage summary: upload.py [options] [-- diff_options]
Diff options are passed to the diff command of the underlying system.
Supported version control systems:
Git
Mercurial
Subversion
It is important for Git/Mercurial users to specify a tree/node/branch to diff
against by using the '--rev' option.
"""
# This code is derived from appcfg.py in the App Engine SDK (open source),
# and from ASPN recipe #146306.
import cookielib
import getpass
import logging
import md5
import mimetypes
import optparse
import os
import re
import socket
import subprocess
import sys
import urllib
import urllib2
import urlparse
import tempfile
try:
import readline
except ImportError:
logging.debug("readline not found.")
pass
# The logging verbosity:
# 0: Errors only.
# 1: Status messages.
# 2: Info logs.
# 3: Debug logs.
verbosity = 1
# Max size of patch or base file.
MAX_UPLOAD_SIZE = 900 * 1024
def AreYouSureOrExit(exit_if_no=True):
prompt = "Are you sure you want to continue?(y/N) "
answer = raw_input(prompt).strip()
if exit_if_no and answer.lower() != "y":
ErrorExit("User aborted")
return answer.lower() == "y"
def GetEmail(prompt):
"""Prompts the user for their email address and returns it.
The last used email address is saved to a file and offered up as a suggestion
to the user. If the user presses enter without typing in anything the last
used email address is used. If the user enters a new address, it is saved
for next time we prompt.
"""
last_email_file_name = os.path.expanduser("~/.last_codereview_email_address")
last_email = ""
if os.path.exists(last_email_file_name):
try:
last_email_file = open(last_email_file_name, "r")
last_email = last_email_file.readline().strip("\n")
last_email_file.close()
prompt += " [%s]" % last_email
except IOError, e:
pass
email = raw_input(prompt + ": ").strip()
if email:
try:
last_email_file = open(last_email_file_name, "w")
last_email_file.write(email)
last_email_file.close()
except IOError, e:
pass
else:
email = last_email
return email
def StatusUpdate(msg):
"""Print a status message to stdout.
If 'verbosity' is greater than 0, print the message.
Args:
msg: The string to print.
"""
if verbosity > 0:
print msg
def ErrorExit(msg):
"""Print an error message to stderr and exit."""
print >>sys.stderr, msg
sys.exit(1)
class ClientLoginError(urllib2.HTTPError):
"""Raised to indicate there was an error authenticating with ClientLogin."""
def __init__(self, url, code, msg, headers, args):
urllib2.HTTPError.__init__(self, url, code, msg, headers, None)
self.args = args
self.reason = args["Error"]
class AbstractRpcServer(object):
"""Provides a common interface for a simple RPC server."""
def __init__(self, host, auth_function, host_override=None, extra_headers={},
save_cookies=False):
"""Creates a new HttpRpcServer.
Args:
host: The host to send requests to.
auth_function: A function that takes no arguments and returns an
(email, password) tuple when called. Will be called if authentication
is required.
host_override: The host header to send to the server (defaults to host).
extra_headers: A dict of extra headers to append to every request.
save_cookies: If True, save the authentication cookies to local disk.
If False, use an in-memory cookiejar instead. Subclasses must
implement this functionality. Defaults to False.
"""
self.host = host
self.host_override = host_override
self.auth_function = auth_function
self.authenticated = False
self.extra_headers = extra_headers
self.save_cookies = save_cookies
self.opener = self._GetOpener()
if self.host_override:
logging.info("Server: %s; Host: %s", self.host, self.host_override)
else:
logging.info("Server: %s", self.host)
def _GetOpener(self):
"""Returns an OpenerDirector for making HTTP requests.
Returns:
A urllib2.OpenerDirector object.
"""
raise NotImplementedError()
def _CreateRequest(self, url, data=None):
"""Creates a new urllib request."""
logging.debug("Creating request for: '%s' with payload:\n%s", url, data)
req = urllib2.Request(url, data=data)
if self.host_override:
req.add_header("Host", self.host_override)
for key, value in self.extra_headers.iteritems():
req.add_header(key, value)
return req
def _GetAuthToken(self, email, password):
"""Uses ClientLogin to authenticate the user, returning an auth token.
Args:
email: The user's email address
password: The user's password
Raises:
ClientLoginError: If there was an error authenticating with ClientLogin.
HTTPError: If there was some other form of HTTP error.
Returns:
The authentication token returned by ClientLogin.
"""
account_type = "GOOGLE"
if self.host.endswith(".google.com"):
# Needed for use inside Google.
account_type = "HOSTED"
req = self._CreateRequest(
url="https://www.google.com/accounts/ClientLogin",
data=urllib.urlencode({
"Email": email,
"Passwd": password,
"service": "ah",
"source": "rietveld-codereview-upload",
"accountType": account_type,
}),
)
try:
response = self.opener.open(req)
response_body = response.read()
response_dict = dict(x.split("=")
for x in response_body.split("\n") if x)
return response_dict["Auth"]
except urllib2.HTTPError, e:
if e.code == 403:
body = e.read()
response_dict = dict(x.split("=", 1) for x in body.split("\n") if x)
raise ClientLoginError(req.get_full_url(), e.code, e.msg,
e.headers, response_dict)
else:
raise
def _GetAuthCookie(self, auth_token):
"""Fetches authentication cookies for an authentication token.
Args:
auth_token: The authentication token returned by ClientLogin.
Raises:
HTTPError: If there was an error fetching the authentication cookies.
"""
# This is a dummy value to allow us to identify when we're successful.
continue_location = "http://localhost/"
args = {"continue": continue_location, "auth": auth_token}
req = self._CreateRequest("http://%s/_ah/login?%s" %
(self.host, urllib.urlencode(args)))
try:
response = self.opener.open(req)
except urllib2.HTTPError, e:
response = e
if (response.code != 302 or
response.info()["location"] != continue_location):
raise urllib2.HTTPError(req.get_full_url(), response.code, response.msg,
response.headers, response.fp)
self.authenticated = True
def _Authenticate(self):
"""Authenticates the user.
The authentication process works as follows:
1) We get a username and password from the user
2) We use ClientLogin to obtain an AUTH token for the user
(see http://code.google.com/apis/accounts/AuthForInstalledApps.html).
3) We pass the auth token to /_ah/login on the server to obtain an
authentication cookie. If login was successful, it tries to redirect
us to the URL we provided.
If we attempt to access the upload API without first obtaining an
authentication cookie, it returns a 401 response and directs us to
authenticate ourselves with ClientLogin.
"""
for i in range(3):
credentials = self.auth_function()
try:
auth_token = self._GetAuthToken(credentials[0], credentials[1])
except ClientLoginError, e:
if e.reason == "BadAuthentication":
print >>sys.stderr, "Invalid username or password."
continue
if e.reason == "CaptchaRequired":
print >>sys.stderr, (
"Please go to\n"
"https://www.google.com/accounts/DisplayUnlockCaptcha\n"
"and verify you are a human. Then try again.")
break
if e.reason == "NotVerified":
print >>sys.stderr, "Account not verified."
break
if e.reason == "TermsNotAgreed":
print >>sys.stderr, "User has not agreed to TOS."
break
if e.reason == "AccountDeleted":
print >>sys.stderr, "The user account has been deleted."
break
if e.reason == "AccountDisabled":
print >>sys.stderr, "The user account has been disabled."
break
if e.reason == "ServiceDisabled":
print >>sys.stderr, ("The user's access to the service has been "
"disabled.")
break
if e.reason == "ServiceUnavailable":
print >>sys.stderr, "The service is not available; try again later."
break
raise
self._GetAuthCookie(auth_token)
return
def Send(self, request_path, payload=None,
content_type="application/octet-stream",
timeout=None,
**kwargs):
"""Sends an RPC and returns the response.
Args:
request_path: The path to send the request to, eg /api/appversion/create.
payload: The body of the request, or None to send an empty request.
content_type: The Content-Type header to use.
timeout: timeout in seconds; default None i.e. no timeout.
(Note: for large requests on OS X, the timeout doesn't work right.)
kwargs: Any keyword arguments are converted into query string parameters.
Returns:
The response body, as a string.
"""
# TODO: Don't require authentication. Let the server say
# whether it is necessary.
if not self.authenticated:
self._Authenticate()
old_timeout = socket.getdefaulttimeout()
socket.setdefaulttimeout(timeout)
try:
tries = 0
while True:
tries += 1
args = dict(kwargs)
url = "http://%s%s" % (self.host, request_path)
if args:
url += "?" + urllib.urlencode(args)
req = self._CreateRequest(url=url, data=payload)
req.add_header("Content-Type", content_type)
try:
f = self.opener.open(req)
response = f.read()
f.close()
return response
except urllib2.HTTPError, e:
if tries > 3:
raise
elif e.code == 401:
self._Authenticate()
## elif e.code >= 500 and e.code < 600:
## # Server Error - try again.
## continue
else:
raise
finally:
socket.setdefaulttimeout(old_timeout)
class HttpRpcServer(AbstractRpcServer):
"""Provides a simplified RPC-style interface for HTTP requests."""
def _Authenticate(self):
"""Save the cookie jar after authentication."""
super(HttpRpcServer, self)._Authenticate()
if self.save_cookies:
StatusUpdate("Saving authentication cookies to %s" % self.cookie_file)
self.cookie_jar.save()
def _GetOpener(self):
"""Returns an OpenerDirector that supports cookies and ignores redirects.
Returns:
A urllib2.OpenerDirector object.
"""
opener = urllib2.OpenerDirector()
opener.add_handler(urllib2.ProxyHandler())
opener.add_handler(urllib2.UnknownHandler())
opener.add_handler(urllib2.HTTPHandler())
opener.add_handler(urllib2.HTTPDefaultErrorHandler())
opener.add_handler(urllib2.HTTPSHandler())
opener.add_handler(urllib2.HTTPErrorProcessor())
if self.save_cookies:
self.cookie_file = os.path.expanduser("~/.codereview_upload_cookies")
self.cookie_jar = cookielib.MozillaCookieJar(self.cookie_file)
if os.path.exists(self.cookie_file):
try:
self.cookie_jar.load()
self.authenticated = True
StatusUpdate("Loaded authentication cookies from %s" %
self.cookie_file)
except (cookielib.LoadError, IOError):
# Failed to load cookies - just ignore them.
pass
else:
# Create an empty cookie file with mode 600
fd = os.open(self.cookie_file, os.O_CREAT, 0600)
os.close(fd)
# Always chmod the cookie file
os.chmod(self.cookie_file, 0600)
else:
# Don't save cookies across runs of update.py.
self.cookie_jar = cookielib.CookieJar()
opener.add_handler(urllib2.HTTPCookieProcessor(self.cookie_jar))
return opener
parser = optparse.OptionParser(usage="%prog [options] [-- diff_options]")
parser.add_option("-y", "--assume_yes", action="store_true",
dest="assume_yes", default=False,
help="Assume that the answer to yes/no questions is 'yes'.")
# Logging
group = parser.add_option_group("Logging options")
group.add_option("-q", "--quiet", action="store_const", const=0,
dest="verbose", help="Print errors only.")
group.add_option("-v", "--verbose", action="store_const", const=2,
dest="verbose", default=1,
help="Print info level logs (default).")
group.add_option("--noisy", action="store_const", const=3,
dest="verbose", help="Print all logs.")
# Review server
group = parser.add_option_group("Review server options")
group.add_option("-s", "--server", action="store", dest="server",
default="codereview.appspot.com",
metavar="SERVER",
help=("The server to upload to. The format is host[:port]. "
"Defaults to 'codereview.appspot.com'."))
group.add_option("-e", "--email", action="store", dest="email",
metavar="EMAIL", default=None,
help="The username to use. Will prompt if omitted.")
group.add_option("-H", "--host", action="store", dest="host",
metavar="HOST", default=None,
help="Overrides the Host header sent with all RPCs.")
group.add_option("--no_cookies", action="store_false",
dest="save_cookies", default=True,
help="Do not save authentication cookies to local disk.")
# Issue
group = parser.add_option_group("Issue options")
group.add_option("-d", "--description", action="store", dest="description",
metavar="DESCRIPTION", default=None,
help="Optional description when creating an issue.")
group.add_option("--min_pylint_score", action="store", dest="min_pylint_score",
metavar="MIN_PYLINT_SCORE", default=None,
help="run pylint over changed files and require a min score.")
group.add_option("-f", "--description_file", action="store",
dest="description_file", metavar="DESCRIPTION_FILE",
default=None,
help="Optional path of a file that contains "
"the description when creating an issue.")
group.add_option("--description_editor", action="store_true",
dest="description_editor", metavar="DESCRIPTION_EDITOR",
default=False,
help="use an editor (EDITOR env variable) to get the "
"description when creating an issue.")
group.add_option("-r", "--reviewers", action="store", dest="reviewers",
metavar="REVIEWERS", default=None,
help="Add reviewers (comma separated email addresses).")
group.add_option("--cc", action="store", dest="cc",
metavar="CC", default=None,
help="Add CC (comma separated email addresses).")
# Upload options
group = parser.add_option_group("Patch options")
group.add_option("-m", "--message", action="store", dest="message",
metavar="MESSAGE", default=None,
help="A message to identify the patch. "
"Will prompt if omitted.")
group.add_option("-i", "--issue", type="int", action="store",
metavar="ISSUE", default=None,
help="Issue number to which to add. Defaults to new issue.")
group.add_option("--download_base", action="store_true",
dest="download_base", default=False,
help="Base files will be downloaded by the server "
"(side-by-side diffs may not work on files with CRs).")
group.add_option("--rev", action="store", dest="revision",
metavar="REV", default=None,
help="Branch/tree/revision to diff against (used by DVCS).")
group.add_option("--send_mail", action="store_true",
dest="send_mail", default=False,
help="Send notification email to reviewers.")
def GetRpcServer(options):
"""Returns an instance of an AbstractRpcServer.
Returns:
A new AbstractRpcServer, on which RPC calls can be made.
"""
rpc_server_class = HttpRpcServer
def GetUserCredentials():
"""Prompts the user for a username and password."""
email = options.email
if email is None:
email = GetEmail("Email (login for uploading to %s)" % options.server)
password = getpass.getpass("Password for %s: " % email)
return (email, password)
# If this is the dev_appserver, use fake authentication.
host = (options.host or options.server).lower()
if host == "localhost" or host.startswith("localhost:"):
email = options.email
if email is None:
email = "test@example.com"
logging.info("Using debug user %s. Override with --email" % email)
server = rpc_server_class(
options.server,
lambda: (email, "password"),
host_override=options.host,
extra_headers={"Cookie":
'dev_appserver_login="%s:False"' % email},
save_cookies=options.save_cookies)
# Don't try to talk to ClientLogin.
server.authenticated = True
return server
return rpc_server_class(options.server, GetUserCredentials,
host_override=options.host,
save_cookies=options.save_cookies)
def EncodeMultipartFormData(fields, files):
"""Encode form fields for multipart/form-data.
Args:
fields: A sequence of (name, value) elements for regular form fields.
files: A sequence of (name, filename, value) elements for data to be
uploaded as files.
Returns:
(content_type, body) ready for httplib.HTTP instance.
Source:
http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/146306
"""
BOUNDARY = '-M-A-G-I-C---B-O-U-N-D-A-R-Y-'
CRLF = '\r\n'
lines = []
for (key, value) in fields:
lines.append('--' + BOUNDARY)
lines.append('Content-Disposition: form-data; name="%s"' % key)
lines.append('')
lines.append(value)
for (key, filename, value) in files:
lines.append('--' + BOUNDARY)
lines.append('Content-Disposition: form-data; name="%s"; filename="%s"' %
(key, filename))
lines.append('Content-Type: %s' % GetContentType(filename))
lines.append('')
lines.append(value)
lines.append('--' + BOUNDARY + '--')
lines.append('')
body = CRLF.join(lines)
content_type = 'multipart/form-data; boundary=%s' % BOUNDARY
return content_type, body
def GetContentType(filename):
"""Helper to guess the content-type from the filename."""
return mimetypes.guess_type(filename)[0] or 'application/octet-stream'
# Use a shell for subcommands on Windows to get a PATH search.
use_shell = sys.platform.startswith("win")
def RunShellWithReturnCode(command, print_output=False,
universal_newlines=True):
"""Executes a command and returns the output from stdout and the return code.
Args:
command: Command to execute.
print_output: If True, the output is printed to stdout.
If False, both stdout and stderr are ignored.
universal_newlines: Use universal_newlines flag (default: True).
Returns:
Tuple (output, return code)
"""
logging.info("Running %s", command)
p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
shell=use_shell, universal_newlines=universal_newlines)
if print_output:
output_array = []
while True:
line = p.stdout.readline()
if not line:
break
print line.strip("\n")
output_array.append(line)
output = "".join(output_array)
else:
output = p.stdout.read()
p.wait()
errout = p.stderr.read()
if print_output and errout:
print >>sys.stderr, errout
p.stdout.close()
p.stderr.close()
return output, p.returncode
def RunShell(command, silent_ok=False, universal_newlines=True,
print_output=False, ignore_retcode=False):
data, retcode = RunShellWithReturnCode(command, print_output,
universal_newlines)
if retcode and not ignore_retcode:
ErrorExit("Got error status from %s:\n%s" % (command, data))
if not silent_ok and not data:
ErrorExit("No output from %s" % command)
return data
class VersionControlSystem(object):
"""Abstract base class providing an interface to the VCS."""
def __init__(self, options):
"""Constructor.
Args:
options: Command line options.
"""
self.options = options
def GenerateDiff(self, args):
"""Return the current diff as a string.
Args:
args: Extra arguments to pass to the diff command.
"""
raise NotImplementedError(
"abstract method -- subclass %s must override" % self.__class__)
def GetUnknownFiles(self):
"""Return a list of files unknown to the VCS."""
raise NotImplementedError(
"abstract method -- subclass %s must override" % self.__class__)
def CheckForUnknownFiles(self):
"""Show an "are you sure?" prompt if there are unknown files."""
unknown_files = self.GetUnknownFiles()
if unknown_files:
print "The following files are not added to version control:"
for line in unknown_files:
print line
AreYouSureOrExit()
def GetBaseFile(self, filename):
"""Get the content of the upstream version of a file.
Returns:
A tuple (base_content, new_content, is_binary, status)
base_content: The contents of the base file.
new_content: For text files, this is empty. For binary files, this is
the contents of the new file, since the diff output won't contain
information to reconstruct the current file.
is_binary: True iff the file is binary.
status: The status of the file.
"""
raise NotImplementedError(
"abstract method -- subclass %s must override" % self.__class__)
def GetBaseFiles(self, diff):
"""Helper that calls GetBase file for each file in the patch.
Returns:
A dictionary that maps from filename to GetBaseFile's tuple. Filenames
are retrieved based on lines that start with "Index:" or
"Property changes on:".
"""
files = {}
for line in diff.splitlines(True):
if line.startswith('Index:') or line.startswith('Property changes on:'):
unused, filename = line.split(':', 1)
# On Windows if a file has property changes its filename uses '\'
# instead of '/'.
filename = filename.strip().replace('\\', '/')
files[filename] = self.GetBaseFile(filename)
return files
def UploadBaseFiles(self, issue, rpc_server, patch_list, patchset, options,
files):
"""Uploads the base files (and if necessary, the current ones as well)."""
def UploadFile(filename, file_id, content, is_binary, status, is_base):
"""Uploads a file to the server."""
file_too_large = False
if is_base:
type = "base"
else:
type = "current"
if len(content) > MAX_UPLOAD_SIZE:
print ("Not uploading the %s file for %s because it's too large." %
(type, filename))
file_too_large = True
content = ""
checksum = md5.new(content).hexdigest()
if options.verbose > 0 and not file_too_large:
print "Uploading %s file for %s" % (type, filename)
url = "/%d/upload_content/%d/%d" % (int(issue), int(patchset), file_id)
form_fields = [("filename", filename),
("status", status),
("checksum", checksum),
("is_binary", str(is_binary)),
("is_current", str(not is_base)),
]
if file_too_large:
form_fields.append(("file_too_large", "1"))
if options.email:
form_fields.append(("user", options.email))
ctype, body = EncodeMultipartFormData(form_fields,
[("data", filename, content)])
response_body = rpc_server.Send(url, body,
content_type=ctype)
if not response_body.startswith("OK"):
StatusUpdate(" --> %s" % response_body)
sys.exit(1)
patches = dict()
[patches.setdefault(v, k) for k, v in patch_list]
for filename in patches.keys():
base_content, new_content, is_binary, status = files[filename]
file_id_str = patches.get(filename)
if file_id_str.find("nobase") != -1:
base_content = None
file_id_str = file_id_str[file_id_str.rfind("_") + 1:]
file_id = int(file_id_str)
if base_content != None:
UploadFile(filename, file_id, base_content, is_binary, status, True)
if new_content != None:
UploadFile(filename, file_id, new_content, is_binary, status, False)
def IsImage(self, filename):
"""Returns true if the filename has an image extension."""
mimetype = mimetypes.guess_type(filename)[0]
if not mimetype:
return False
return mimetype.startswith("image/")
class SubversionVCS(VersionControlSystem):
"""Implementation of the VersionControlSystem interface for Subversion."""
def __init__(self, options):
super(SubversionVCS, self).__init__(options)
if self.options.revision:
match = re.match(r"(\d+)(:(\d+))?", self.options.revision)
if not match:
ErrorExit("Invalid Subversion revision %s." % self.options.revision)
self.rev_start = match.group(1)
self.rev_end = match.group(3)
else:
self.rev_start = self.rev_end = None
# Cache output from "svn list -r REVNO dirname".
# Keys: dirname, Values: 2-tuple (ouput for start rev and end rev).
self.svnls_cache = {}
# SVN base URL is required to fetch files deleted in an older revision.
# Result is cached to not guess it over and over again in GetBaseFile().
required = self.options.download_base or self.options.revision is not None
self.svn_base = self._GuessBase(required)
def GuessBase(self, required):
"""Wrapper for _GuessBase."""
return self.svn_base
def _GuessBase(self, required):
"""Returns the SVN base URL.
Args:
required: If true, exits if the url can't be guessed, otherwise None is
returned.
"""
info = RunShell(["svn", "info"])
for line in info.splitlines():
words = line.split()
if len(words) == 2 and words[0] == "URL:":
url = words[1]
scheme, netloc, path, params, query, fragment = urlparse.urlparse(url)
username, netloc = urllib.splituser(netloc)
if username:
logging.info("Removed username from base URL")
if netloc.endswith("svn.python.org"):
if netloc == "svn.python.org":
if path.startswith("/projects/"):
path = path[9:]
elif netloc != "pythondev@svn.python.org":
ErrorExit("Unrecognized Python URL: %s" % url)
base = "http://svn.python.org/view/*checkout*%s/" % path
logging.info("Guessed Python base = %s", base)
elif netloc.endswith("svn.collab.net"):
if path.startswith("/repos/"):
path = path[6:]
base = "http://svn.collab.net/viewvc/*checkout*%s/" % path
logging.info("Guessed CollabNet base = %s", base)
elif netloc.endswith(".googlecode.com"):
path = path + "/"
base = urlparse.urlunparse(("http", netloc, path, params,
query, fragment))
logging.info("Guessed Google Code base = %s", base)
else:
path = path + "/"
base = urlparse.urlunparse((scheme, netloc, path, params,
query, fragment))
logging.info("Guessed base = %s", base)
return base
if required:
ErrorExit("Can't find URL in output from svn info")
return None
def GenerateDiff(self, args):
cmd = ["svn", "diff"]
if self.options.revision:
cmd += ["-r", self.options.revision]
if "--diff-cmd" not in args and os.path.isfile("/usr/bin/diff"):
# force /usr/bin/diff as the diff command used by subversion
# to override user settings (fixes issue with colordiff)
cmd += ["--diff-cmd", "/usr/bin/diff"]
cmd.extend(args)
data = RunShell(cmd)
count = 0
for line in data.splitlines():
if line.startswith("Index:") or line.startswith("Property changes on:"):
count += 1
logging.info(line)
if not count:
ErrorExit("No valid patches found in output from svn diff")
return data
def _CollapseKeywords(self, content, keyword_str):
"""Collapses SVN keywords."""
# svn cat translates keywords but svn diff doesn't. As a result of this
# behavior patching.PatchChunks() fails with a chunk mismatch error.
# This part was originally written by the Review Board development team
# who had the same problem (http://reviews.review-board.org/r/276/).
# Mapping of keywords to known aliases
svn_keywords = {
# Standard keywords
'Date': ['Date', 'LastChangedDate'],
'Revision': ['Revision', 'LastChangedRevision', 'Rev'],
'Author': ['Author', 'LastChangedBy'],
'HeadURL': ['HeadURL', 'URL'],
'Id': ['Id'],
# Aliases
'LastChangedDate': ['LastChangedDate', 'Date'],
'LastChangedRevision': ['LastChangedRevision', 'Rev', 'Revision'],
'LastChangedBy': ['LastChangedBy', 'Author'],
'URL': ['URL', 'HeadURL'],
}
def repl(m):
if m.group(2):
return "$%s::%s$" % (m.group(1), " " * len(m.group(3)))
return "$%s$" % m.group(1)
keywords = [keyword
for name in keyword_str.split(" ")
for keyword in svn_keywords.get(name, [])]
return re.sub(r"\$(%s):(:?)([^\$]+)\$" % '|'.join(keywords), repl, content)
def GetUnknownFiles(self):
status = RunShell(["svn", "status", "--ignore-externals"], silent_ok=True)
unknown_files = []
for line in status.split("\n"):
if line and line[0] == "?":
unknown_files.append(line)
return unknown_files
def ReadFile(self, filename):
"""Returns the contents of a file."""
file = open(filename, 'rb')
result = ""
try:
result = file.read()
finally:
file.close()
return result
def GetStatus(self, filename):
"""Returns the status of a file."""
if not self.options.revision:
status = RunShell(["svn", "status", "--ignore-externals", filename])
if not status:
ErrorExit("svn status returned no output for %s" % filename)
status_lines = status.splitlines()
# If file is in a cl, the output will begin with
# "\n--- Changelist 'cl_name':\n". See
# http://svn.collab.net/repos/svn/trunk/notes/changelist-design.txt
if (len(status_lines) == 3 and
not status_lines[0] and
status_lines[1].startswith("--- Changelist")):
status = status_lines[2]
else:
status = status_lines[0]
# If we have a revision to diff against we need to run "svn list"
# for the old and the new revision and compare the results to get
# the correct status for a file.
else:
dirname, relfilename = os.path.split(filename)
if dirname not in self.svnls_cache:
cmd = ["svn", "list", "-r", self.rev_start, dirname or "."]
out, returncode = RunShellWithReturnCode(cmd)
if returncode:
ErrorExit("Failed to get status for %s." % filename)
old_files = out.splitlines()
args = ["svn", "list"]
if self.rev_end:
args += ["-r", self.rev_end]
cmd = args + [dirname or "."]
out, returncode = RunShellWithReturnCode(cmd)
if returncode:
ErrorExit("Failed to run command %s" % cmd)
self.svnls_cache[dirname] = (old_files, out.splitlines())
old_files, new_files = self.svnls_cache[dirname]
if relfilename in old_files and relfilename not in new_files:
status = "D "
elif relfilename in old_files and relfilename in new_files:
status = "M "
else:
status = "A "
return status
def GetBaseFile(self, filename):
status = self.GetStatus(filename)
base_content = None
new_content = None
# If a file is copied its status will be "A +", which signifies
# "addition-with-history". See "svn st" for more information. We need to
# upload the original file or else diff parsing will fail if the file was
# edited.
if status[0] == "A" and status[3] != "+":
# We'll need to upload the new content if we're adding a binary file
# since diff's output won't contain it.
mimetype = RunShell(["svn", "propget", "svn:mime-type", filename],
silent_ok=True)
base_content = ""
is_binary = bool(mimetype) and not mimetype.startswith("text/")
if is_binary and self.IsImage(filename):
new_content = self.ReadFile(filename)
elif (status[0] in ("M", "D", "R") or
(status[0] == "A" and status[3] == "+") or # Copied file.
(status[0] == " " and status[1] == "M")): # Property change.
args = []
if self.options.revision:
url = "%s/%s@%s" % (self.svn_base, filename, self.rev_start)
else:
# Don't change filename, it's needed later.
url = filename
args += ["-r", "BASE"]
cmd = ["svn"] + args + ["propget", "svn:mime-type", url]
mimetype, returncode = RunShellWithReturnCode(cmd)
if returncode:
# File does not exist in the requested revision.
# Reset mimetype, it contains an error message.
mimetype = ""
get_base = False
is_binary = bool(mimetype) and not mimetype.startswith("text/")
if status[0] == " ":
# Empty base content just to force an upload.
base_content = ""
elif is_binary:
if self.IsImage(filename):
get_base = True
if status[0] == "M":
if not self.rev_end:
new_content = self.ReadFile(filename)
else:
url = "%s/%s@%s" % (self.svn_base, filename, self.rev_end)
new_content = RunShell(["svn", "cat", url],
universal_newlines=True, silent_ok=True)
else:
base_content = ""
else:
get_base = True
if get_base:
if is_binary:
universal_newlines = False
else:
universal_newlines = True
if self.rev_start:
# "svn cat -r REV delete_file.txt" doesn't work. cat requires
# the full URL with "@REV" appended instead of using "-r" option.
url = "%s/%s@%s" % (self.svn_base, filename, self.rev_start)
base_content = RunShell(["svn", "cat", url],
universal_newlines=universal_newlines,
silent_ok=True)
else:
base_content = RunShell(["svn", "cat", filename],
universal_newlines=universal_newlines,
silent_ok=True)
if not is_binary:
args = []
if self.rev_start:
url = "%s/%s@%s" % (self.svn_base, filename, self.rev_start)
else:
url = filename
args += ["-r", "BASE"]
cmd = ["svn"] + args + ["propget", "svn:keywords", url]
keywords, returncode = RunShellWithReturnCode(cmd)
if keywords and not returncode:
base_content = self._CollapseKeywords(base_content, keywords)
else:
StatusUpdate("svn status returned unexpected output: %s" % status)
sys.exit(1)
return base_content, new_content, is_binary, status[0:5]
class GitVCS(VersionControlSystem):
"""Implementation of the VersionControlSystem interface for Git."""
def __init__(self, options):
super(GitVCS, self).__init__(options)
# Map of filename -> hash of base file.
self.base_hashes = {}
def GenerateDiff(self, extra_args):
# This is more complicated than svn's GenerateDiff because we must convert
# the diff output to include an svn-style "Index:" line as well as record
# the hashes of the base files, so we can upload them along with our diff.
if self.options.revision:
extra_args = [self.options.revision] + extra_args
gitdiff = RunShell(["git", "diff", "--full-index"] + extra_args)
svndiff = []
filecount = 0
filename = None
for line in gitdiff.splitlines():
match = re.match(r"diff --git a/(.*) b/.*$", line)
if match:
filecount += 1
filename = match.group(1)
svndiff.append("Index: %s\n" % filename)
else:
# The "index" line in a git diff looks like this (long hashes elided):
# index 82c0d44..b2cee3f 100755
# We want to save the left hash, as that identifies the base file.
match = re.match(r"index (\w+)\.\.", line)
if match:
self.base_hashes[filename] = match.group(1)
svndiff.append(line + "\n")
if not filecount:
ErrorExit("No valid patches found in output from git diff")
return "".join(svndiff)
def GetUnknownFiles(self):
status = RunShell(["git", "ls-files", "--exclude-standard", "--others"],
silent_ok=True)
return status.splitlines()
def GetBaseFile(self, filename):
hash = self.base_hashes[filename]
base_content = None
new_content = None
is_binary = False
if hash == "0" * 40: # All-zero hash indicates no base file.
status = "A"
base_content = ""
else:
status = "M"
base_content, returncode = RunShellWithReturnCode(["git", "show", hash])
if returncode:
ErrorExit("Got error status from 'git show %s'" % hash)
return (base_content, new_content, is_binary, status)
class MercurialVCS(VersionControlSystem):
"""Implementation of the VersionControlSystem interface for Mercurial."""
def __init__(self, options, repo_dir):
super(MercurialVCS, self).__init__(options)
# Absolute path to repository (we can be in a subdir)
self.repo_dir = os.path.normpath(repo_dir)
# Compute the subdir
cwd = os.path.normpath(os.getcwd())
assert cwd.startswith(self.repo_dir)
self.subdir = cwd[len(self.repo_dir):].lstrip(r"\/")
if self.options.revision:
self.base_rev = self.options.revision
else:
self.base_rev = RunShell(["hg", "parent", "-q"]).split(':')[1].strip()
def _GetRelPath(self, filename):
"""Get relative path of a file according to the current directory,
given its logical path in the repo."""
assert filename.startswith(self.subdir), filename
return filename[len(self.subdir):].lstrip(r"\/")
def GenerateDiff(self, extra_args):
# If no file specified, restrict to the current subdir
extra_args = extra_args or ["."]
cmd = ["hg", "diff", "--git", "-r", self.base_rev] + extra_args
data = RunShell(cmd, silent_ok=True)
svndiff = []
filecount = 0
for line in data.splitlines():
m = re.match("diff --git a/(\S+) b/(\S+)", line)
if m:
# Modify line to make it look like as it comes from svn diff.
# With this modification no changes on the server side are required
# to make upload.py work with Mercurial repos.
# NOTE: for proper handling of moved/copied files, we have to use
# the second filename.
filename = m.group(2)
svndiff.append("Index: %s" % filename)
svndiff.append("=" * 67)
filecount += 1
logging.info(line)
else:
svndiff.append(line)
if not filecount:
ErrorExit("No valid patches found in output from hg diff")
return "\n".join(svndiff) + "\n"
def GetUnknownFiles(self):
"""Return a list of files unknown to the VCS."""
args = []
status = RunShell(["hg", "status", "--rev", self.base_rev, "-u", "."],
silent_ok=True)
unknown_files = []
for line in status.splitlines():
st, fn = line.split(" ", 1)
if st == "?":
unknown_files.append(fn)
return unknown_files
def GetBaseFile(self, filename):
# "hg status" and "hg cat" both take a path relative to the current subdir
# rather than to the repo root, but "hg diff" has given us the full path
# to the repo root.
base_content = ""
new_content = None
is_binary = False
oldrelpath = relpath = self._GetRelPath(filename)
# "hg status -C" returns two lines for moved/copied files, one otherwise
out = RunShell(["hg", "status", "-C", "--rev", self.base_rev, relpath])
out = out.splitlines()
# HACK: strip error message about missing file/directory if it isn't in
# the working copy
if out[0].startswith('%s: ' % relpath):
out = out[1:]
if len(out) > 1:
# Moved/copied => considered as modified, use old filename to
# retrieve base contents
oldrelpath = out[1].strip()
status = "M"
else:
status, _ = out[0].split(' ', 1)
if status != "A":
base_content = RunShell(["hg", "cat", "-r", self.base_rev, oldrelpath],
silent_ok=True)
is_binary = "\0" in base_content # Mercurial's heuristic
if status != "R":
new_content = open(relpath, "rb").read()
is_binary = is_binary or "\0" in new_content
if is_binary and base_content:
# Fetch again without converting newlines
base_content = RunShell(["hg", "cat", "-r", self.base_rev, oldrelpath],
silent_ok=True, universal_newlines=False)
if not is_binary or not self.IsImage(relpath):
new_content = None
return base_content, new_content, is_binary, status
# NOTE: The SplitPatch function is duplicated in engine.py, keep them in sync.
def SplitPatch(data):
"""Splits a patch into separate pieces for each file.
Args:
data: A string containing the output of svn diff.
Returns:
A list of 2-tuple (filename, text) where text is the svn diff output
pertaining to filename.
"""
patches = []
filename = None
diff = []
for line in data.splitlines(True):
new_filename = None
if line.startswith('Index:'):
unused, new_filename = line.split(':', 1)
new_filename = new_filename.strip()
elif line.startswith('Property changes on:'):
unused, temp_filename = line.split(':', 1)
# When a file is modified, paths use '/' between directories, however
# when a property is modified '\' is used on Windows. Make them the same
# otherwise the file shows up twice.
temp_filename = temp_filename.strip().replace('\\', '/')
if temp_filename != filename:
# File has property changes but no modifications, create a new diff.
new_filename = temp_filename
if new_filename:
if filename and diff:
patches.append((filename, ''.join(diff)))
filename = new_filename
diff = [line]
continue
if diff is not None:
diff.append(line)
if filename and diff:
patches.append((filename, ''.join(diff)))
return patches
def UploadSeparatePatches(issue, rpc_server, patchset, data, options):
"""Uploads a separate patch for each file in the diff output.
Returns a list of [patch_key, filename] for each file.
"""
patches = SplitPatch(data)
rv = []
for patch in patches:
if len(patch[1]) > MAX_UPLOAD_SIZE:
print ("Not uploading the patch for " + patch[0] +
" because the file is too large.")
continue
form_fields = [("filename", patch[0])]
if not options.download_base:
form_fields.append(("content_upload", "1"))
files = [("data", "data.diff", patch[1])]
ctype, body = EncodeMultipartFormData(form_fields, files)
url = "/%d/upload_patch/%d" % (int(issue), int(patchset))
print "Uploading patch for " + patch[0]
response_body = rpc_server.Send(url, body, content_type=ctype)
lines = response_body.splitlines()
if not lines or lines[0] != "OK":
StatusUpdate(" --> %s" % response_body)
sys.exit(1)
rv.append([lines[1], patch[0]])
return rv
def GuessVCS(options):
"""Helper to guess the version control system.
This examines the current directory, guesses which VersionControlSystem
we're using, and returns an instance of the appropriate class. Exit with an
error if we can't figure it out.
Returns:
A VersionControlSystem instance. Exits if the VCS can't be guessed.
"""
# Mercurial has a command to get the base directory of a repository
# Try running it, but don't die if we don't have hg installed.
# NOTE: we try Mercurial first as it can sit on top of an SVN working copy.
try:
out, returncode = RunShellWithReturnCode(["hg", "root"])
if returncode == 0:
return MercurialVCS(options, out.strip())
except OSError, (errno, message):
if errno != 2: # ENOENT -- they don't have hg installed.
raise
# Subversion has a .svn in all working directories.
if os.path.isdir('.svn'):
logging.info("Guessed VCS = Subversion")
return SubversionVCS(options)
# Git has a command to test if you're in a git tree.
# Try running it, but don't die if we don't have git installed.
try:
out, returncode = RunShellWithReturnCode(["git", "rev-parse",
"--is-inside-work-tree"])
if returncode == 0:
return GitVCS(options)
except OSError, (errno, message):
if errno != 2: # ENOENT -- they don't have git installed.
raise
ErrorExit(("Could not guess version control system. "
"Are you in a working copy directory?"))
def RealMain(argv, data=None):
"""The real main function.
Args:
argv: Command line arguments.
data: Diff contents. If None (default) the diff is generated by
the VersionControlSystem implementation returned by GuessVCS().
Returns:
A 2-tuple (issue id, patchset id).
The patchset id is None if the base files are not uploaded by this
script (applies only to SVN checkouts).
"""
logging.basicConfig(format=("%(asctime).19s %(levelname)s %(filename)s:"
"%(lineno)s %(message)s "))
os.environ['LC_ALL'] = 'C'
options, args = parser.parse_args(argv[1:])
global verbosity
verbosity = options.verbose
if verbosity >= 3:
logging.getLogger().setLevel(logging.DEBUG)
elif verbosity >= 2:
logging.getLogger().setLevel(logging.INFO)
vcs = GuessVCS(options)
if isinstance(vcs, SubversionVCS):
# base field is only allowed for Subversion.
# Note: Fetching base files may become deprecated in future releases.
base = vcs.GuessBase(options.download_base)
else:
base = None
if not base and options.download_base:
options.download_base = True
logging.info("Enabled upload of base file")
if not options.assume_yes:
vcs.CheckForUnknownFiles()
if data is None:
data = vcs.GenerateDiff(args)
files = vcs.GetBaseFiles(data)
if options.min_pylint_score:
print "running pylint..."
has_low_score = 0
for file in files:
if re.search(r'[.]py$', file):
print "pylinting "+file+"..."
res = RunShell(["pylint", file], silent_ok=True, ignore_retcode=True)
match = re.search(r'Your code has been rated at ([0-9.-]+)', res)
try:
score = float(match.group(1))
except:
score = -1.0
print file,"rated at",score
if score < float(options.min_pylint_score):
has_low_score += 1
if has_low_score > 0:
print "pylint reported", has_low_score, \
"files with scores below", options.min_pylint_score
AreYouSureOrExit()
if verbosity >= 1:
print "Upload server:", options.server, "(change with -s/--server)"
if options.issue:
prompt = "Message describing this patch set: "
else:
prompt = "New issue subject: "
message = options.message or raw_input(prompt).strip()
if not message:
ErrorExit("A non-empty message is required")
rpc_server = GetRpcServer(options)
form_fields = [("subject", message)]
if base:
form_fields.append(("base", base))
if options.issue:
form_fields.append(("issue", str(options.issue)))
if options.email:
form_fields.append(("user", options.email))
if options.reviewers:
for reviewer in options.reviewers.split(','):
if "@" in reviewer and not reviewer.split("@")[1].count(".") == 1:
ErrorExit("Invalid email address: %s" % reviewer)
form_fields.append(("reviewers", options.reviewers))
if options.cc:
for cc in options.cc.split(','):
if "@" in cc and not cc.split("@")[1].count(".") == 1:
ErrorExit("Invalid email address: %s" % cc)
form_fields.append(("cc", options.cc))
description = options.description
if options.description_file:
if options.description:
ErrorExit("Can't specify description and description_file")
file = open(options.description_file, 'r')
description = file.read()
file.close()
if options.description_editor:
if options.description:
ErrorExit("Can't specify description and description_editor")
if options.description_file:
ErrorExit("Can't specify description_file and description_editor")
if 'EDITOR' not in os.environ:
ErrorExit("Please set the EDITOR environment variable.")
editor = os.environ['EDITOR']
if editor == None or editor == "":
ErrorExit("Please set the EDITOR environment variable.")
tempfh, filename = tempfile.mkstemp()
msg = "demo URL: http://your-url/foo/\ndescription: (start on next line)\n"
os.write(tempfh, msg)
os.close(tempfh)
print "running EDITOR:", editor, filename
cmd = editor + " " + filename
subprocess.call(cmd, shell=True)
file = open(filename, 'r')
description = file.read()
file.close()
os.unlink(filename)
print description
if description:
form_fields.append(("description", description))
# Send a hash of all the base file so the server can determine if a copy
# already exists in an earlier patchset.
base_hashes = ""
for file, info in files.iteritems():
if not info[0] is None:
checksum = md5.new(info[0]).hexdigest()
if base_hashes:
base_hashes += "|"
base_hashes += checksum + ":" + file
form_fields.append(("base_hashes", base_hashes))
# If we're uploading base files, don't send the email before the uploads, so
# that it contains the file status.
if options.send_mail and options.download_base:
form_fields.append(("send_mail", "1"))
if not options.download_base:
form_fields.append(("content_upload", "1"))
if len(data) > MAX_UPLOAD_SIZE:
print "Patch is large, so uploading file patches separately."
uploaded_diff_file = []
form_fields.append(("separate_patches", "1"))
else:
uploaded_diff_file = [("data", "data.diff", data)]
ctype, body = EncodeMultipartFormData(form_fields, uploaded_diff_file)
response_body = rpc_server.Send("/upload", body, content_type=ctype)
patchset = None
if not options.download_base or not uploaded_diff_file:
lines = response_body.splitlines()
if len(lines) >= 2:
msg = lines[0]
patchset = lines[1].strip()
patches = [x.split(" ", 1) for x in lines[2:]]
else:
msg = response_body
else:
msg = response_body
StatusUpdate(msg)
if not response_body.startswith("Issue created.") and \
not response_body.startswith("Issue updated."):
sys.exit(0)
issue = msg[msg.rfind("/")+1:]
if not uploaded_diff_file:
result = UploadSeparatePatches(issue, rpc_server, patchset, data, options)
if not options.download_base:
patches = result
if not options.download_base:
vcs.UploadBaseFiles(issue, rpc_server, patches, patchset, options, files)
if options.send_mail:
rpc_server.Send("/" + issue + "/mail", payload="")
return issue, patchset
FPREVIEW_ADDR = "footprint2009reviews.appspot.com"
def main():
try:
if len(sys.argv) == 1:
print "Usage:", sys.argv[0], "<email address of primary reviewer>"
print "(automatically cc's", FPREVIEW_ADDR, ")"
sys.exit(1)
args = [sys.argv[0], "-s", "footprint2009reviews.appspot.com"]
args.append("--cc=footprint-engreviews@googlegroups.com")
args.append("--description_editor")
args.append("--send_mail")
args.append("--min_pylint_score")
# we're starting with 9.0
args.append("9.0")
args.append("-r")
email = sys.argv[1]
if email.find("@") == -1:
email += "@gmail.com"
print >>sys.stderr, "*** sending to "+email+" for review. (note: @gmail.com)"
args.append(email)
sys.argv = args + sys.argv[2:]
if "PYLINTRC" not in os.environ:
testpath = os.getcwd()
while testpath != "" and not os.path.exists(testpath + "/pylintrc"):
testpath = re.sub(r'/[^/]*$', '', testpath)
print "checking for "+testpath + "/pylintrc"
if testpath == "":
print >>sys.stderr, "ERROR: couldn't find 'pylintrc' file."
sys.exit(1)
os.environ['PYLINTRC'] = testpath + "/pylintrc"
print "guessing PYLINTRC="+os.environ['PYLINTRC']
print "running: ", " ".join(sys.argv)
RealMain(sys.argv)
except KeyboardInterrupt:
print
StatusUpdate("Interrupted.")
sys.exit(1)
if __name__ == "__main__":
main()
| Python |
#!/usr/bin/python
#
# didn't use generateDS because it required a slew of packages to be installed,
# like pulling on a sweater.
"""horrible regexp-based converter from XSD to human-readable HTML."""
# disable line too long-- irrelevant here
# pylint: disable-msg=C0301
# usage: python spec2html.py < spec0.1.xsd > spec0.1.html
import sys
import re
def main():
"""wrap the code in scope."""
outstr = sys.stdin.read()
version = (re.findall(r'<xs:schema version="(.+?)"', outstr))[0]
outstr = re.sub(r'(\r?\n|\r)', r'', outstr)
outstr = re.sub(r'<[?]xml.+?>', r'', outstr)
outstr = re.sub(r'</?xs:schema.*?>', r'', outstr)
outstr = re.sub(r'<code>(.+?)</code>', r'<a href="#\1"><code>\1</code></a>', outstr)
outstr = re.sub(r'<pcode>(.+?)</pcode>', r'<code>\1</code>', outstr)
outstr = re.sub(r'<(/?(code|p|a|br|b).*?)>', r'&&\1@@', outstr)
outstr = re.sub(r'<', r'', outstr)
outstr = re.sub(r'/?>', r'', outstr)
#blockquoting
outstr = re.sub(r'/xs:(all|sequence)', r'</blockquote>', outstr)
#Change element to selement for distinguishing multiple entries later on
outstr = re.sub(r'xs:sequence(.+?)xs:element', r'xs:sequence\1xs:selement', outstr)
#blockquoting
outstr = re.sub(r'xs:(all|sequence)', r'<blockquote>', outstr)
#Named types
outstr = re.sub(r'xs:(simple|complex)Type name="(.+?)"(.+?)/xs:(simple|complex)Type',
r'<div class="namedType"><div class="entryName"><a name="\2">\2 (\1 type)</a></div>\3</div>', outstr)
#Extension
outstr = re.sub(r'xs:extension\s+?base="(xs:)?(.+?)"(.+?)/xs:extension', r'<div class="info">derived from: \2</div>\3', outstr)
#restriction
outstr = re.sub(r'xs:restriction\s+?base="(xs:)?(.+?)"(.+?)/xs:restriction', r'<div class="info">derived from: \2</div>\3', outstr)
#attribute entries
outstr = re.sub(r'/xs:attribute', r'</blockquote></div>\n', outstr)
outstr = re.sub(r'\s*xs:attribute name="(.+?)"', r'<div class="entry"><blockquote><div class="entryName"><a name="\1">\1 (attribute)</a></div>\n', outstr)
#element entries
outstr = re.sub(r'/xs:element', r'</div>\n', outstr)
outstr = re.sub(r'\s*xs:selement name="(.+?)"(.+?)', r'<div class="entry repeated"><div class="entryName"><a name="\1">\1 (repeated element)</a></div>\n', outstr)
outstr = re.sub(r'\s*xs:element name="(.+?)"(.+?)', r'<div class="entry"><div class="entryName"><a name="\1">\1 (element)</a></div>\n', outstr)
#documentation
outstr = re.sub(r'xs:annotation\s+xs:documentation\s+!\[CDATA\[\s*(.+?)\s*\]\]\s+/xs:documentation\s+/xs:annotation', r'<div class="doc-text">\1</div>', outstr)
#Little stuff in entries
outstr = re.sub(r'use="(.+?)"', r'<span class="info">use is \1</span><br/>', outstr)
outstr = re.sub(r'default=""', r'<span class="info">default value: <code>(empty string)</code></span><br/>', outstr)
outstr = re.sub(r'default="(.+?)"', r'<span class="info">default value: <code>\1</code></span><br/>', outstr)
outstr = re.sub(r'fixed="(.+?)"', r'<span class="info">fixed value: <code>\1</code></span><br/>', outstr)
outstr = re.sub(r'xs:enumeration value="(.+?)"', r'<span class="info">allowed value: <code>\1</code></span><br/>', outstr)
outstr = re.sub(r'xs:pattern value="(.+?)"', r'<span class="info">must match (regular expression): <code>\1</code></span><br/>', outstr)
outstr = re.sub(r'type="(xs:)?(.+?)"', r'<span class="info">datatype: \2</span><br/>', outstr)
outstr = re.sub(r'minOccurs="0"', r'<span class="info">required: optional.</span><br/>', outstr)
outstr = re.sub(r'minOccurs="([0-9]+)"', r'<span class="info">required: at least \1 times</span><br/>', outstr)
outstr = re.sub(r'maxOccurs="1"', r'<span class="info">Multiple not allowed</span><br/>', outstr)
outstr = re.sub(r'maxOccurs="unbounded"', r'\n', outstr)
#putting in links
outstr = re.sub(r'(datatype|derived from): (locationType|dateTimeDurationType|yesNoEnum|sexRestrictedEnum|dateTimeOlsonDefaultPacific|timeOlson|dateTimeNoTZ|timeNoTZ)', r'\1: <a href="#\2"><code>\2</code></a>\n', outstr)
outstr = re.sub(r'(datatype|derived from): (string)', r'\1: <a href="http://www.w3schools.com/Schema/schema_dtypes_string.asp"><code>\2</code></a>\n', outstr)
outstr = re.sub(r'(datatype|derived from): (dateTime|date|time|duration)', r'\1: <a href="http://www.w3schools.com/Schema/schema_dtypes_date.asp"><code>\2</code></a>\n', outstr)
outstr = re.sub(r'(datatype|derived from): (integer|decimal)', r'\1: <a href="http://www.w3schools.com/Schema/schema_dtypes_numeric.asp"><code>\2</code></a>\n', outstr)
#Drop stuff we don't care about
outstr = re.sub(r'/?xs:(simpleContent|complexType)', r'', outstr)
#clean-up
outstr = re.sub(r'&&', r'<', outstr)
outstr = re.sub(r'@@', r'>', outstr)
outstr = re.sub(r'\s*<br/>', r'<br/>\n', outstr)
print "<html>"
print "<head>"
print "<title>Footprint XML Specification Version", version, "</title>"
#print '<LINK REL="StyleSheet" HREF="spec.css" TYPE="text/css"/>'
print "<style>"
cssfh = open('spec.css')
print cssfh.read()
print "</style>"
print "</head>"
print "<body>"
print '<div class="titleText">Footprint XML Specification Version', version, '</div><br>'
print outstr
print "</body></html>"
main()
| Python |
#!/usr/bin/python
#
# didn't use generateDS because it required a slew of packages to be installed,
# like pulling on a sweater.
"""horrible regexp-based converter from XSD to human-readable HTML."""
# disable line too long-- irrelevant here
# pylint: disable-msg=C0301
# usage: python spec2html.py < spec0.1.xsd > spec0.1.html
import sys
import re
def main():
"""wrap the code in scope."""
outstr = sys.stdin.read()
version = (re.findall(r'<xs:schema version="(.+?)"', outstr))[0]
outstr = re.sub(r'(\r?\n|\r)', r'', outstr)
outstr = re.sub(r'<[?]xml.+?>', r'', outstr)
outstr = re.sub(r'</?xs:schema.*?>', r'', outstr)
outstr = re.sub(r'<code>(.+?)</code>', r'<a href="#\1"><code>\1</code></a>', outstr)
outstr = re.sub(r'<pcode>(.+?)</pcode>', r'<code>\1</code>', outstr)
outstr = re.sub(r'<(/?(code|p|a|br|b).*?)>', r'&&\1@@', outstr)
outstr = re.sub(r'<', r'', outstr)
outstr = re.sub(r'/?>', r'', outstr)
#blockquoting
outstr = re.sub(r'/xs:(all|sequence)', r'</blockquote>', outstr)
#Change element to selement for distinguishing multiple entries later on
outstr = re.sub(r'xs:sequence(.+?)xs:element', r'xs:sequence\1xs:selement', outstr)
#blockquoting
outstr = re.sub(r'xs:(all|sequence)', r'<blockquote>', outstr)
#Named types
outstr = re.sub(r'xs:(simple|complex)Type name="(.+?)"(.+?)/xs:(simple|complex)Type',
r'<div class="namedType"><div class="entryName"><a name="\2">\2 (\1 type)</a></div>\3</div>', outstr)
#Extension
outstr = re.sub(r'xs:extension\s+?base="(xs:)?(.+?)"(.+?)/xs:extension', r'<div class="info">derived from: \2</div>\3', outstr)
#restriction
outstr = re.sub(r'xs:restriction\s+?base="(xs:)?(.+?)"(.+?)/xs:restriction', r'<div class="info">derived from: \2</div>\3', outstr)
#attribute entries
outstr = re.sub(r'/xs:attribute', r'</blockquote></div>\n', outstr)
outstr = re.sub(r'\s*xs:attribute name="(.+?)"', r'<div class="entry"><blockquote><div class="entryName"><a name="\1">\1 (attribute)</a></div>\n', outstr)
#element entries
outstr = re.sub(r'/xs:element', r'</div>\n', outstr)
outstr = re.sub(r'\s*xs:selement name="(.+?)"(.+?)', r'<div class="entry repeated"><div class="entryName"><a name="\1">\1 (repeated element)</a></div>\n', outstr)
outstr = re.sub(r'\s*xs:element name="(.+?)"(.+?)', r'<div class="entry"><div class="entryName"><a name="\1">\1 (element)</a></div>\n', outstr)
#documentation
outstr = re.sub(r'xs:annotation\s+xs:documentation\s+!\[CDATA\[\s*(.+?)\s*\]\]\s+/xs:documentation\s+/xs:annotation', r'<div class="doc-text">\1</div>', outstr)
#Little stuff in entries
outstr = re.sub(r'use="(.+?)"', r'<span class="info">use is \1</span><br/>', outstr)
outstr = re.sub(r'default=""', r'<span class="info">default value: <code>(empty string)</code></span><br/>', outstr)
outstr = re.sub(r'default="(.+?)"', r'<span class="info">default value: <code>\1</code></span><br/>', outstr)
outstr = re.sub(r'fixed="(.+?)"', r'<span class="info">fixed value: <code>\1</code></span><br/>', outstr)
outstr = re.sub(r'xs:enumeration value="(.+?)"', r'<span class="info">allowed value: <code>\1</code></span><br/>', outstr)
outstr = re.sub(r'xs:pattern value="(.+?)"', r'<span class="info">must match (regular expression): <code>\1</code></span><br/>', outstr)
outstr = re.sub(r'type="(xs:)?(.+?)"', r'<span class="info">datatype: \2</span><br/>', outstr)
outstr = re.sub(r'minOccurs="0"', r'<span class="info">required: optional.</span><br/>', outstr)
outstr = re.sub(r'minOccurs="([0-9]+)"', r'<span class="info">required: at least \1 times</span><br/>', outstr)
outstr = re.sub(r'maxOccurs="1"', r'<span class="info">Multiple not allowed</span><br/>', outstr)
outstr = re.sub(r'maxOccurs="unbounded"', r'\n', outstr)
#putting in links
outstr = re.sub(r'(datatype|derived from): (locationType|dateTimeDurationType|yesNoEnum|sexRestrictedEnum|dateTimeOlsonDefaultPacific|timeOlson|dateTimeNoTZ|timeNoTZ)', r'\1: <a href="#\2"><code>\2</code></a>\n', outstr)
outstr = re.sub(r'(datatype|derived from): (string)', r'\1: <a href="http://www.w3schools.com/Schema/schema_dtypes_string.asp"><code>\2</code></a>\n', outstr)
outstr = re.sub(r'(datatype|derived from): (dateTime|date|time|duration)', r'\1: <a href="http://www.w3schools.com/Schema/schema_dtypes_date.asp"><code>\2</code></a>\n', outstr)
outstr = re.sub(r'(datatype|derived from): (integer|decimal)', r'\1: <a href="http://www.w3schools.com/Schema/schema_dtypes_numeric.asp"><code>\2</code></a>\n', outstr)
#Drop stuff we don't care about
outstr = re.sub(r'/?xs:(simpleContent|complexType)', r'', outstr)
#clean-up
outstr = re.sub(r'&&', r'<', outstr)
outstr = re.sub(r'@@', r'>', outstr)
outstr = re.sub(r'\s*<br/>', r'<br/>\n', outstr)
print "<html>"
print "<head>"
print "<title>Footprint XML Specification Version", version, "</title>"
#print '<LINK REL="StyleSheet" HREF="spec.css" TYPE="text/css"/>'
print "<style>"
cssfh = open('spec.css')
print cssfh.read()
print "</style>"
print "</head>"
print "<body>"
print '<div class="titleText">Footprint XML Specification Version', version, '</div><br>'
print outstr
print "</body></html>"
main()
| Python |
#!/usr/bin/env python
#
# Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Exports TSV data over HTTP.
Usage:
%s [flags]
--url=<string> URL endpoint to get exported data. (Required)
--batch_size=<int> Number of Entity objects to include in each post to
smaller the batch size should be. (Default 1000)
--filename=<path> Path to the TSV file to export. (Required)
--digsig=<string> value passed to endpoint permitting export
The exit status will be 0 on success, non-zero on failure.
"""
import sys
import re
import logging
import getopt
import urllib2
import datetime
def PrintUsageExit(code):
print sys.modules['__main__'].__doc__ % sys.argv[0]
sys.stdout.flush()
sys.stderr.flush()
sys.exit(code)
def Pull(filename, url, min_key, delim, prefix):
# get content from url and write to filename
try:
connection = urllib2.urlopen(url);
# TODO: read 100 lines incrementally and show progress
content = connection.read()
connection.close()
except urllib2.URLError, e:
logging.error('%s returned error %i, %s' % (url, e.code, e.msg))
sys.exit(2)
try:
tsv_file = file(filename, 'a')
except IOError:
logging.error("I/O error({0}): {1}".format(errno, os.strerror(errno)))
sys.exit(3)
if prefix:
lines = content.split("\n")
lines.pop()
content = ("%s" % prefix) + ("\n%s" % prefix).join(lines) + "\n"
tsv_file.write(content)
tsv_file.close()
# count the number of lines
list = content.splitlines()
line_count = len(list)
last_line = list[line_count - 1]
if min_key == "":
# that's our header, don't count it
line_count -= 1
# get the key value of the last line
fields = last_line.split(delim)
min_key = fields[0][4:]
return min_key, line_count
def ParseArguments(argv):
opts, args = getopt.getopt(
argv[1:],
'dh',
['debug', 'help',
'url=', 'filename=', 'prefix=', 'digsig=', 'batch_size='
])
url = None
filename = None
digsig = ''
prefix = ''
batch_size = 1000
for option, value in opts:
if option == '--debug':
logging.getLogger().setLevel(logging.DEBUG)
if option in ('-h', '--help'):
PrintUsageExit(0)
if option == '--url':
url = value
if option == '--filename':
filename = value
if option == '--prefix':
prefix = value
if option == '--digsig':
digsig = value
if option == '--batch_size':
batch_size = int(value)
if batch_size <= 0:
print >>sys.stderr, 'batch_size must be 1 or larger'
PrintUsageExit(1)
return (url, filename, batch_size, prefix, digsig)
def main(argv):
logging.basicConfig(
level=logging.INFO,
format='%(levelname)-8s %(asctime)s %(message)s')
args = ParseArguments(argv)
if [arg for arg in args if arg is None]:
print >>sys.stderr, 'Invalid arguments'
PrintUsageExit(1)
url, filename, batch_size, prefix, digsig = args
delim = "\t"
min_key = ""
lines = batch_size + 2
while lines >= batch_size:
url_step = ("%s?digsig=%s&min_key=%s&limit=%s" %
(url, str(digsig), str(min_key), str(batch_size)))
if min_key != "":
log_key = min_key
else:
log_key = "[start]"
t0 = datetime.datetime.now()
min_key, lines = Pull(filename, url_step, min_key, delim, prefix)
#print min_key
diff = datetime.datetime.now() - t0
secs = "%d.%d" % (diff.seconds, diff.microseconds/1000)
logging.info('fetched header + %d in %s secs from %s', lines, secs, log_key)
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
| Python |
"""
Copyright (c) 2003-2007 Gustavo Niemeyer <gustavo@niemeyer.net>
This module offers extensions to the standard python 2.3+
datetime module.
"""
__author__ = "Gustavo Niemeyer <gustavo@niemeyer.net>"
__license__ = "PSF License"
__version__ = "1.4.1"
| Python |
"""
Copyright (c) 2003-2007 Gustavo Niemeyer <gustavo@niemeyer.net>
This module offers extensions to the standard python 2.3+
datetime module.
"""
__author__ = "Gustavo Niemeyer <gustavo@niemeyer.net>"
__license__ = "PSF License"
import itertools
import datetime
import calendar
import thread
import sys
__all__ = ["rrule", "rruleset", "rrulestr",
"YEARLY", "MONTHLY", "WEEKLY", "DAILY",
"HOURLY", "MINUTELY", "SECONDLY",
"MO", "TU", "WE", "TH", "FR", "SA", "SU"]
# Every mask is 7 days longer to handle cross-year weekly periods.
M366MASK = tuple([1]*31+[2]*29+[3]*31+[4]*30+[5]*31+[6]*30+
[7]*31+[8]*31+[9]*30+[10]*31+[11]*30+[12]*31+[1]*7)
M365MASK = list(M366MASK)
M29, M30, M31 = range(1,30), range(1,31), range(1,32)
MDAY366MASK = tuple(M31+M29+M31+M30+M31+M30+M31+M31+M30+M31+M30+M31+M31[:7])
MDAY365MASK = list(MDAY366MASK)
M29, M30, M31 = range(-29,0), range(-30,0), range(-31,0)
NMDAY366MASK = tuple(M31+M29+M31+M30+M31+M30+M31+M31+M30+M31+M30+M31+M31[:7])
NMDAY365MASK = list(NMDAY366MASK)
M366RANGE = (0,31,60,91,121,152,182,213,244,274,305,335,366)
M365RANGE = (0,31,59,90,120,151,181,212,243,273,304,334,365)
WDAYMASK = [0,1,2,3,4,5,6]*55
del M29, M30, M31, M365MASK[59], MDAY365MASK[59], NMDAY365MASK[31]
MDAY365MASK = tuple(MDAY365MASK)
M365MASK = tuple(M365MASK)
(YEARLY,
MONTHLY,
WEEKLY,
DAILY,
HOURLY,
MINUTELY,
SECONDLY) = range(7)
# Imported on demand.
easter = None
parser = None
class weekday(object):
__slots__ = ["weekday", "n"]
def __init__(self, weekday, n=None):
if n == 0:
raise ValueError, "Can't create weekday with n == 0"
self.weekday = weekday
self.n = n
def __call__(self, n):
if n == self.n:
return self
else:
return self.__class__(self.weekday, n)
def __eq__(self, other):
try:
if self.weekday != other.weekday or self.n != other.n:
return False
except AttributeError:
return False
return True
def __repr__(self):
s = ("MO", "TU", "WE", "TH", "FR", "SA", "SU")[self.weekday]
if not self.n:
return s
else:
return "%s(%+d)" % (s, self.n)
MO, TU, WE, TH, FR, SA, SU = weekdays = tuple([weekday(x) for x in range(7)])
class rrulebase:
def __init__(self, cache=False):
if cache:
self._cache = []
self._cache_lock = thread.allocate_lock()
self._cache_gen = self._iter()
self._cache_complete = False
else:
self._cache = None
self._cache_complete = False
self._len = None
def __iter__(self):
if self._cache_complete:
return iter(self._cache)
elif self._cache is None:
return self._iter()
else:
return self._iter_cached()
def _iter_cached(self):
i = 0
gen = self._cache_gen
cache = self._cache
acquire = self._cache_lock.acquire
release = self._cache_lock.release
while gen:
if i == len(cache):
acquire()
if self._cache_complete:
break
try:
for j in range(10):
cache.append(gen.next())
except StopIteration:
self._cache_gen = gen = None
self._cache_complete = True
break
release()
yield cache[i]
i += 1
while i < self._len:
yield cache[i]
i += 1
def __getitem__(self, item):
if self._cache_complete:
return self._cache[item]
elif isinstance(item, slice):
if item.step and item.step < 0:
return list(iter(self))[item]
else:
return list(itertools.islice(self,
item.start or 0,
item.stop or sys.maxint,
item.step or 1))
elif item >= 0:
gen = iter(self)
try:
for i in range(item+1):
res = gen.next()
except StopIteration:
raise IndexError
return res
else:
return list(iter(self))[item]
def __contains__(self, item):
if self._cache_complete:
return item in self._cache
else:
for i in self:
if i == item:
return True
elif i > item:
return False
return False
# __len__() introduces a large performance penality.
def count(self):
if self._len is None:
for x in self: pass
return self._len
def before(self, dt, inc=False):
if self._cache_complete:
gen = self._cache
else:
gen = self
last = None
if inc:
for i in gen:
if i > dt:
break
last = i
else:
for i in gen:
if i >= dt:
break
last = i
return last
def after(self, dt, inc=False):
if self._cache_complete:
gen = self._cache
else:
gen = self
if inc:
for i in gen:
if i >= dt:
return i
else:
for i in gen:
if i > dt:
return i
return None
def between(self, after, before, inc=False):
if self._cache_complete:
gen = self._cache
else:
gen = self
started = False
l = []
if inc:
for i in gen:
if i > before:
break
elif not started:
if i >= after:
started = True
l.append(i)
else:
l.append(i)
else:
for i in gen:
if i >= before:
break
elif not started:
if i > after:
started = True
l.append(i)
else:
l.append(i)
return l
class rrule(rrulebase):
def __init__(self, freq, dtstart=None,
interval=1, wkst=None, count=None, until=None, bysetpos=None,
bymonth=None, bymonthday=None, byyearday=None, byeaster=None,
byweekno=None, byweekday=None,
byhour=None, byminute=None, bysecond=None,
cache=False):
rrulebase.__init__(self, cache)
global easter
if not dtstart:
dtstart = datetime.datetime.now().replace(microsecond=0)
elif not isinstance(dtstart, datetime.datetime):
dtstart = datetime.datetime.fromordinal(dtstart.toordinal())
else:
dtstart = dtstart.replace(microsecond=0)
self._dtstart = dtstart
self._tzinfo = dtstart.tzinfo
self._freq = freq
self._interval = interval
self._count = count
if until and not isinstance(until, datetime.datetime):
until = datetime.datetime.fromordinal(until.toordinal())
self._until = until
if wkst is None:
self._wkst = calendar.firstweekday()
elif type(wkst) is int:
self._wkst = wkst
else:
self._wkst = wkst.weekday
if bysetpos is None:
self._bysetpos = None
elif type(bysetpos) is int:
if bysetpos == 0 or not (-366 <= bysetpos <= 366):
raise ValueError("bysetpos must be between 1 and 366, "
"or between -366 and -1")
self._bysetpos = (bysetpos,)
else:
self._bysetpos = tuple(bysetpos)
for pos in self._bysetpos:
if pos == 0 or not (-366 <= pos <= 366):
raise ValueError("bysetpos must be between 1 and 366, "
"or between -366 and -1")
if not (byweekno or byyearday or bymonthday or
byweekday is not None or byeaster is not None):
if freq == YEARLY:
if not bymonth:
bymonth = dtstart.month
bymonthday = dtstart.day
elif freq == MONTHLY:
bymonthday = dtstart.day
elif freq == WEEKLY:
byweekday = dtstart.weekday()
# bymonth
if not bymonth:
self._bymonth = None
elif type(bymonth) is int:
self._bymonth = (bymonth,)
else:
self._bymonth = tuple(bymonth)
# byyearday
if not byyearday:
self._byyearday = None
elif type(byyearday) is int:
self._byyearday = (byyearday,)
else:
self._byyearday = tuple(byyearday)
# byeaster
if byeaster is not None:
if not easter:
from dateutil import easter
if type(byeaster) is int:
self._byeaster = (byeaster,)
else:
self._byeaster = tuple(byeaster)
else:
self._byeaster = None
# bymonthay
if not bymonthday:
self._bymonthday = ()
self._bynmonthday = ()
elif type(bymonthday) is int:
if bymonthday < 0:
self._bynmonthday = (bymonthday,)
self._bymonthday = ()
else:
self._bymonthday = (bymonthday,)
self._bynmonthday = ()
else:
self._bymonthday = tuple([x for x in bymonthday if x > 0])
self._bynmonthday = tuple([x for x in bymonthday if x < 0])
# byweekno
if byweekno is None:
self._byweekno = None
elif type(byweekno) is int:
self._byweekno = (byweekno,)
else:
self._byweekno = tuple(byweekno)
# byweekday / bynweekday
if byweekday is None:
self._byweekday = None
self._bynweekday = None
elif type(byweekday) is int:
self._byweekday = (byweekday,)
self._bynweekday = None
elif hasattr(byweekday, "n"):
if not byweekday.n or freq > MONTHLY:
self._byweekday = (byweekday.weekday,)
self._bynweekday = None
else:
self._bynweekday = ((byweekday.weekday, byweekday.n),)
self._byweekday = None
else:
self._byweekday = []
self._bynweekday = []
for wday in byweekday:
if type(wday) is int:
self._byweekday.append(wday)
elif not wday.n or freq > MONTHLY:
self._byweekday.append(wday.weekday)
else:
self._bynweekday.append((wday.weekday, wday.n))
self._byweekday = tuple(self._byweekday)
self._bynweekday = tuple(self._bynweekday)
if not self._byweekday:
self._byweekday = None
elif not self._bynweekday:
self._bynweekday = None
# byhour
if byhour is None:
if freq < HOURLY:
self._byhour = (dtstart.hour,)
else:
self._byhour = None
elif type(byhour) is int:
self._byhour = (byhour,)
else:
self._byhour = tuple(byhour)
# byminute
if byminute is None:
if freq < MINUTELY:
self._byminute = (dtstart.minute,)
else:
self._byminute = None
elif type(byminute) is int:
self._byminute = (byminute,)
else:
self._byminute = tuple(byminute)
# bysecond
if bysecond is None:
if freq < SECONDLY:
self._bysecond = (dtstart.second,)
else:
self._bysecond = None
elif type(bysecond) is int:
self._bysecond = (bysecond,)
else:
self._bysecond = tuple(bysecond)
if self._freq >= HOURLY:
self._timeset = None
else:
self._timeset = []
for hour in self._byhour:
for minute in self._byminute:
for second in self._bysecond:
self._timeset.append(
datetime.time(hour, minute, second,
tzinfo=self._tzinfo))
self._timeset.sort()
self._timeset = tuple(self._timeset)
def _iter(self):
year, month, day, hour, minute, second, weekday, yearday, _ = \
self._dtstart.timetuple()
# Some local variables to speed things up a bit
freq = self._freq
interval = self._interval
wkst = self._wkst
until = self._until
bymonth = self._bymonth
byweekno = self._byweekno
byyearday = self._byyearday
byweekday = self._byweekday
byeaster = self._byeaster
bymonthday = self._bymonthday
bynmonthday = self._bynmonthday
bysetpos = self._bysetpos
byhour = self._byhour
byminute = self._byminute
bysecond = self._bysecond
ii = _iterinfo(self)
ii.rebuild(year, month)
getdayset = {YEARLY:ii.ydayset,
MONTHLY:ii.mdayset,
WEEKLY:ii.wdayset,
DAILY:ii.ddayset,
HOURLY:ii.ddayset,
MINUTELY:ii.ddayset,
SECONDLY:ii.ddayset}[freq]
if freq < HOURLY:
timeset = self._timeset
else:
gettimeset = {HOURLY:ii.htimeset,
MINUTELY:ii.mtimeset,
SECONDLY:ii.stimeset}[freq]
if ((freq >= HOURLY and
self._byhour and hour not in self._byhour) or
(freq >= MINUTELY and
self._byminute and minute not in self._byminute) or
(freq >= SECONDLY and
self._bysecond and minute not in self._bysecond)):
timeset = ()
else:
timeset = gettimeset(hour, minute, second)
total = 0
count = self._count
while True:
# Get dayset with the right frequency
dayset, start, end = getdayset(year, month, day)
# Do the "hard" work ;-)
filtered = False
for i in dayset[start:end]:
if ((bymonth and ii.mmask[i] not in bymonth) or
(byweekno and not ii.wnomask[i]) or
(byweekday and ii.wdaymask[i] not in byweekday) or
(ii.nwdaymask and not ii.nwdaymask[i]) or
(byeaster and not ii.eastermask[i]) or
((bymonthday or bynmonthday) and
ii.mdaymask[i] not in bymonthday and
ii.nmdaymask[i] not in bynmonthday) or
(byyearday and
((i < ii.yearlen and i+1 not in byyearday
and -ii.yearlen+i not in byyearday) or
(i >= ii.yearlen and i+1-ii.yearlen not in byyearday
and -ii.nextyearlen+i-ii.yearlen
not in byyearday)))):
dayset[i] = None
filtered = True
# Output results
if bysetpos and timeset:
poslist = []
for pos in bysetpos:
if pos < 0:
daypos, timepos = divmod(pos, len(timeset))
else:
daypos, timepos = divmod(pos-1, len(timeset))
try:
i = [x for x in dayset[start:end]
if x is not None][daypos]
time = timeset[timepos]
except IndexError:
pass
else:
date = datetime.date.fromordinal(ii.yearordinal+i)
res = datetime.datetime.combine(date, time)
if res not in poslist:
poslist.append(res)
poslist.sort()
for res in poslist:
if until and res > until:
self._len = total
return
elif res >= self._dtstart:
total += 1
yield res
if count:
count -= 1
if not count:
self._len = total
return
else:
for i in dayset[start:end]:
if i is not None:
date = datetime.date.fromordinal(ii.yearordinal+i)
for time in timeset:
res = datetime.datetime.combine(date, time)
if until and res > until:
self._len = total
return
elif res >= self._dtstart:
total += 1
yield res
if count:
count -= 1
if not count:
self._len = total
return
# Handle frequency and interval
fixday = False
if freq == YEARLY:
year += interval
if year > datetime.MAXYEAR:
self._len = total
return
ii.rebuild(year, month)
elif freq == MONTHLY:
month += interval
if month > 12:
div, mod = divmod(month, 12)
month = mod
year += div
if month == 0:
month = 12
year -= 1
if year > datetime.MAXYEAR:
self._len = total
return
ii.rebuild(year, month)
elif freq == WEEKLY:
if wkst > weekday:
day += -(weekday+1+(6-wkst))+self._interval*7
else:
day += -(weekday-wkst)+self._interval*7
weekday = wkst
fixday = True
elif freq == DAILY:
day += interval
fixday = True
elif freq == HOURLY:
if filtered:
# Jump to one iteration before next day
hour += ((23-hour)//interval)*interval
while True:
hour += interval
div, mod = divmod(hour, 24)
if div:
hour = mod
day += div
fixday = True
if not byhour or hour in byhour:
break
timeset = gettimeset(hour, minute, second)
elif freq == MINUTELY:
if filtered:
# Jump to one iteration before next day
minute += ((1439-(hour*60+minute))//interval)*interval
while True:
minute += interval
div, mod = divmod(minute, 60)
if div:
minute = mod
hour += div
div, mod = divmod(hour, 24)
if div:
hour = mod
day += div
fixday = True
filtered = False
if ((not byhour or hour in byhour) and
(not byminute or minute in byminute)):
break
timeset = gettimeset(hour, minute, second)
elif freq == SECONDLY:
if filtered:
# Jump to one iteration before next day
second += (((86399-(hour*3600+minute*60+second))
//interval)*interval)
while True:
second += self._interval
div, mod = divmod(second, 60)
if div:
second = mod
minute += div
div, mod = divmod(minute, 60)
if div:
minute = mod
hour += div
div, mod = divmod(hour, 24)
if div:
hour = mod
day += div
fixday = True
if ((not byhour or hour in byhour) and
(not byminute or minute in byminute) and
(not bysecond or second in bysecond)):
break
timeset = gettimeset(hour, minute, second)
if fixday and day > 28:
daysinmonth = calendar.monthrange(year, month)[1]
if day > daysinmonth:
while day > daysinmonth:
day -= daysinmonth
month += 1
if month == 13:
month = 1
year += 1
if year > datetime.MAXYEAR:
self._len = total
return
daysinmonth = calendar.monthrange(year, month)[1]
ii.rebuild(year, month)
class _iterinfo(object):
__slots__ = ["rrule", "lastyear", "lastmonth",
"yearlen", "nextyearlen", "yearordinal", "yearweekday",
"mmask", "mrange", "mdaymask", "nmdaymask",
"wdaymask", "wnomask", "nwdaymask", "eastermask"]
def __init__(self, rrule):
for attr in self.__slots__:
setattr(self, attr, None)
self.rrule = rrule
def rebuild(self, year, month):
# Every mask is 7 days longer to handle cross-year weekly periods.
rr = self.rrule
if year != self.lastyear:
self.yearlen = 365+calendar.isleap(year)
self.nextyearlen = 365+calendar.isleap(year+1)
firstyday = datetime.date(year, 1, 1)
self.yearordinal = firstyday.toordinal()
self.yearweekday = firstyday.weekday()
wday = datetime.date(year, 1, 1).weekday()
if self.yearlen == 365:
self.mmask = M365MASK
self.mdaymask = MDAY365MASK
self.nmdaymask = NMDAY365MASK
self.wdaymask = WDAYMASK[wday:]
self.mrange = M365RANGE
else:
self.mmask = M366MASK
self.mdaymask = MDAY366MASK
self.nmdaymask = NMDAY366MASK
self.wdaymask = WDAYMASK[wday:]
self.mrange = M366RANGE
if not rr._byweekno:
self.wnomask = None
else:
self.wnomask = [0]*(self.yearlen+7)
#no1wkst = firstwkst = self.wdaymask.index(rr._wkst)
no1wkst = firstwkst = (7-self.yearweekday+rr._wkst)%7
if no1wkst >= 4:
no1wkst = 0
# Number of days in the year, plus the days we got
# from last year.
wyearlen = self.yearlen+(self.yearweekday-rr._wkst)%7
else:
# Number of days in the year, minus the days we
# left in last year.
wyearlen = self.yearlen-no1wkst
div, mod = divmod(wyearlen, 7)
numweeks = div+mod//4
for n in rr._byweekno:
if n < 0:
n += numweeks+1
if not (0 < n <= numweeks):
continue
if n > 1:
i = no1wkst+(n-1)*7
if no1wkst != firstwkst:
i -= 7-firstwkst
else:
i = no1wkst
for j in range(7):
self.wnomask[i] = 1
i += 1
if self.wdaymask[i] == rr._wkst:
break
if 1 in rr._byweekno:
# Check week number 1 of next year as well
# TODO: Check -numweeks for next year.
i = no1wkst+numweeks*7
if no1wkst != firstwkst:
i -= 7-firstwkst
if i < self.yearlen:
# If week starts in next year, we
# don't care about it.
for j in range(7):
self.wnomask[i] = 1
i += 1
if self.wdaymask[i] == rr._wkst:
break
if no1wkst:
# Check last week number of last year as
# well. If no1wkst is 0, either the year
# started on week start, or week number 1
# got days from last year, so there are no
# days from last year's last week number in
# this year.
if -1 not in rr._byweekno:
lyearweekday = datetime.date(year-1,1,1).weekday()
lno1wkst = (7-lyearweekday+rr._wkst)%7
lyearlen = 365+calendar.isleap(year-1)
if lno1wkst >= 4:
lno1wkst = 0
lnumweeks = 52+(lyearlen+
(lyearweekday-rr._wkst)%7)%7//4
else:
lnumweeks = 52+(self.yearlen-no1wkst)%7//4
else:
lnumweeks = -1
if lnumweeks in rr._byweekno:
for i in range(no1wkst):
self.wnomask[i] = 1
if (rr._bynweekday and
(month != self.lastmonth or year != self.lastyear)):
ranges = []
if rr._freq == YEARLY:
if rr._bymonth:
for month in rr._bymonth:
ranges.append(self.mrange[month-1:month+1])
else:
ranges = [(0, self.yearlen)]
elif rr._freq == MONTHLY:
ranges = [self.mrange[month-1:month+1]]
if ranges:
# Weekly frequency won't get here, so we may not
# care about cross-year weekly periods.
self.nwdaymask = [0]*self.yearlen
for first, last in ranges:
last -= 1
for wday, n in rr._bynweekday:
if n < 0:
i = last+(n+1)*7
i -= (self.wdaymask[i]-wday)%7
else:
i = first+(n-1)*7
i += (7-self.wdaymask[i]+wday)%7
if first <= i <= last:
self.nwdaymask[i] = 1
if rr._byeaster:
self.eastermask = [0]*(self.yearlen+7)
eyday = easter.easter(year).toordinal()-self.yearordinal
for offset in rr._byeaster:
self.eastermask[eyday+offset] = 1
self.lastyear = year
self.lastmonth = month
def ydayset(self, year, month, day):
return range(self.yearlen), 0, self.yearlen
def mdayset(self, year, month, day):
set = [None]*self.yearlen
start, end = self.mrange[month-1:month+1]
for i in range(start, end):
set[i] = i
return set, start, end
def wdayset(self, year, month, day):
# We need to handle cross-year weeks here.
set = [None]*(self.yearlen+7)
i = datetime.date(year, month, day).toordinal()-self.yearordinal
start = i
for j in range(7):
set[i] = i
i += 1
#if (not (0 <= i < self.yearlen) or
# self.wdaymask[i] == self.rrule._wkst):
# This will cross the year boundary, if necessary.
if self.wdaymask[i] == self.rrule._wkst:
break
return set, start, i
def ddayset(self, year, month, day):
set = [None]*self.yearlen
i = datetime.date(year, month, day).toordinal()-self.yearordinal
set[i] = i
return set, i, i+1
def htimeset(self, hour, minute, second):
set = []
rr = self.rrule
for minute in rr._byminute:
for second in rr._bysecond:
set.append(datetime.time(hour, minute, second,
tzinfo=rr._tzinfo))
set.sort()
return set
def mtimeset(self, hour, minute, second):
set = []
rr = self.rrule
for second in rr._bysecond:
set.append(datetime.time(hour, minute, second, tzinfo=rr._tzinfo))
set.sort()
return set
def stimeset(self, hour, minute, second):
return (datetime.time(hour, minute, second,
tzinfo=self.rrule._tzinfo),)
class rruleset(rrulebase):
class _genitem:
def __init__(self, genlist, gen):
try:
self.dt = gen()
genlist.append(self)
except StopIteration:
pass
self.genlist = genlist
self.gen = gen
def next(self):
try:
self.dt = self.gen()
except StopIteration:
self.genlist.remove(self)
def __cmp__(self, other):
return cmp(self.dt, other.dt)
def __init__(self, cache=False):
rrulebase.__init__(self, cache)
self._rrule = []
self._rdate = []
self._exrule = []
self._exdate = []
def rrule(self, rrule):
self._rrule.append(rrule)
def rdate(self, rdate):
self._rdate.append(rdate)
def exrule(self, exrule):
self._exrule.append(exrule)
def exdate(self, exdate):
self._exdate.append(exdate)
def _iter(self):
rlist = []
self._rdate.sort()
self._genitem(rlist, iter(self._rdate).next)
for gen in [iter(x).next for x in self._rrule]:
self._genitem(rlist, gen)
rlist.sort()
exlist = []
self._exdate.sort()
self._genitem(exlist, iter(self._exdate).next)
for gen in [iter(x).next for x in self._exrule]:
self._genitem(exlist, gen)
exlist.sort()
lastdt = None
total = 0
while rlist:
ritem = rlist[0]
if not lastdt or lastdt != ritem.dt:
while exlist and exlist[0] < ritem:
exlist[0].next()
exlist.sort()
if not exlist or ritem != exlist[0]:
total += 1
yield ritem.dt
lastdt = ritem.dt
ritem.next()
rlist.sort()
self._len = total
class _rrulestr:
_freq_map = {"YEARLY": YEARLY,
"MONTHLY": MONTHLY,
"WEEKLY": WEEKLY,
"DAILY": DAILY,
"HOURLY": HOURLY,
"MINUTELY": MINUTELY,
"SECONDLY": SECONDLY}
_weekday_map = {"MO":0,"TU":1,"WE":2,"TH":3,"FR":4,"SA":5,"SU":6}
def _handle_int(self, rrkwargs, name, value, **kwargs):
rrkwargs[name.lower()] = int(value)
def _handle_int_list(self, rrkwargs, name, value, **kwargs):
rrkwargs[name.lower()] = [int(x) for x in value.split(',')]
_handle_INTERVAL = _handle_int
_handle_COUNT = _handle_int
_handle_BYSETPOS = _handle_int_list
_handle_BYMONTH = _handle_int_list
_handle_BYMONTHDAY = _handle_int_list
_handle_BYYEARDAY = _handle_int_list
_handle_BYEASTER = _handle_int_list
_handle_BYWEEKNO = _handle_int_list
_handle_BYHOUR = _handle_int_list
_handle_BYMINUTE = _handle_int_list
_handle_BYSECOND = _handle_int_list
def _handle_FREQ(self, rrkwargs, name, value, **kwargs):
rrkwargs["freq"] = self._freq_map[value]
def _handle_UNTIL(self, rrkwargs, name, value, **kwargs):
global parser
if not parser:
from dateutil import parser
try:
rrkwargs["until"] = parser.parse(value,
ignoretz=kwargs.get("ignoretz"),
tzinfos=kwargs.get("tzinfos"))
except ValueError:
raise ValueError, "invalid until date"
def _handle_WKST(self, rrkwargs, name, value, **kwargs):
rrkwargs["wkst"] = self._weekday_map[value]
def _handle_BYWEEKDAY(self, rrkwargs, name, value, **kwarsg):
l = []
for wday in value.split(','):
for i in range(len(wday)):
if wday[i] not in '+-0123456789':
break
n = wday[:i] or None
w = wday[i:]
if n: n = int(n)
l.append(weekdays[self._weekday_map[w]](n))
rrkwargs["byweekday"] = l
_handle_BYDAY = _handle_BYWEEKDAY
def _parse_rfc_rrule(self, line,
dtstart=None,
cache=False,
ignoretz=False,
tzinfos=None):
if line.find(':') != -1:
name, value = line.split(':')
if name != "RRULE":
raise ValueError, "unknown parameter name"
else:
value = line
rrkwargs = {}
for pair in value.split(';'):
name, value = pair.split('=')
name = name.upper()
value = value.upper()
try:
getattr(self, "_handle_"+name)(rrkwargs, name, value,
ignoretz=ignoretz,
tzinfos=tzinfos)
except AttributeError:
raise ValueError, "unknown parameter '%s'" % name
except (KeyError, ValueError):
raise ValueError, "invalid '%s': %s" % (name, value)
return rrule(dtstart=dtstart, cache=cache, **rrkwargs)
def _parse_rfc(self, s,
dtstart=None,
cache=False,
unfold=False,
forceset=False,
compatible=False,
ignoretz=False,
tzinfos=None):
global parser
if compatible:
forceset = True
unfold = True
s = s.upper()
if not s.strip():
raise ValueError, "empty string"
if unfold:
lines = s.splitlines()
i = 0
while i < len(lines):
line = lines[i].rstrip()
if not line:
del lines[i]
elif i > 0 and line[0] == " ":
lines[i-1] += line[1:]
del lines[i]
else:
i += 1
else:
lines = s.split()
if (not forceset and len(lines) == 1 and
(s.find(':') == -1 or s.startswith('RRULE:'))):
return self._parse_rfc_rrule(lines[0], cache=cache,
dtstart=dtstart, ignoretz=ignoretz,
tzinfos=tzinfos)
else:
rrulevals = []
rdatevals = []
exrulevals = []
exdatevals = []
for line in lines:
if not line:
continue
if line.find(':') == -1:
name = "RRULE"
value = line
else:
name, value = line.split(':', 1)
parms = name.split(';')
if not parms:
raise ValueError, "empty property name"
name = parms[0]
parms = parms[1:]
if name == "RRULE":
for parm in parms:
raise ValueError, "unsupported RRULE parm: "+parm
rrulevals.append(value)
elif name == "RDATE":
for parm in parms:
if parm != "VALUE=DATE-TIME":
raise ValueError, "unsupported RDATE parm: "+parm
rdatevals.append(value)
elif name == "EXRULE":
for parm in parms:
raise ValueError, "unsupported EXRULE parm: "+parm
exrulevals.append(value)
elif name == "EXDATE":
for parm in parms:
if parm != "VALUE=DATE-TIME":
raise ValueError, "unsupported RDATE parm: "+parm
exdatevals.append(value)
elif name == "DTSTART":
for parm in parms:
raise ValueError, "unsupported DTSTART parm: "+parm
if not parser:
from dateutil import parser
dtstart = parser.parse(value, ignoretz=ignoretz,
tzinfos=tzinfos)
else:
raise ValueError, "unsupported property: "+name
if (forceset or len(rrulevals) > 1 or
rdatevals or exrulevals or exdatevals):
if not parser and (rdatevals or exdatevals):
from dateutil import parser
set = rruleset(cache=cache)
for value in rrulevals:
set.rrule(self._parse_rfc_rrule(value, dtstart=dtstart,
ignoretz=ignoretz,
tzinfos=tzinfos))
for value in rdatevals:
for datestr in value.split(','):
set.rdate(parser.parse(datestr,
ignoretz=ignoretz,
tzinfos=tzinfos))
for value in exrulevals:
set.exrule(self._parse_rfc_rrule(value, dtstart=dtstart,
ignoretz=ignoretz,
tzinfos=tzinfos))
for value in exdatevals:
for datestr in value.split(','):
set.exdate(parser.parse(datestr,
ignoretz=ignoretz,
tzinfos=tzinfos))
if compatible and dtstart:
set.rdate(dtstart)
return set
else:
return self._parse_rfc_rrule(rrulevals[0],
dtstart=dtstart,
cache=cache,
ignoretz=ignoretz,
tzinfos=tzinfos)
def __call__(self, s, **kwargs):
return self._parse_rfc(s, **kwargs)
rrulestr = _rrulestr()
# vim:ts=4:sw=4:et
| Python |
"""
Copyright (c) 2003-2007 Gustavo Niemeyer <gustavo@niemeyer.net>
This module offers extensions to the standard python 2.3+
datetime module.
"""
__author__ = "Gustavo Niemeyer <gustavo@niemeyer.net>"
__license__ = "PSF License"
import datetime
__all__ = ["easter", "EASTER_JULIAN", "EASTER_ORTHODOX", "EASTER_WESTERN"]
EASTER_JULIAN = 1
EASTER_ORTHODOX = 2
EASTER_WESTERN = 3
def easter(year, method=EASTER_WESTERN):
"""
This method was ported from the work done by GM Arts,
on top of the algorithm by Claus Tondering, which was
based in part on the algorithm of Ouding (1940), as
quoted in "Explanatory Supplement to the Astronomical
Almanac", P. Kenneth Seidelmann, editor.
This algorithm implements three different easter
calculation methods:
1 - Original calculation in Julian calendar, valid in
dates after 326 AD
2 - Original method, with date converted to Gregorian
calendar, valid in years 1583 to 4099
3 - Revised method, in Gregorian calendar, valid in
years 1583 to 4099 as well
These methods are represented by the constants:
EASTER_JULIAN = 1
EASTER_ORTHODOX = 2
EASTER_WESTERN = 3
The default method is method 3.
More about the algorithm may be found at:
http://users.chariot.net.au/~gmarts/eastalg.htm
and
http://www.tondering.dk/claus/calendar.html
"""
if not (1 <= method <= 3):
raise ValueError, "invalid method"
# g - Golden year - 1
# c - Century
# h - (23 - Epact) mod 30
# i - Number of days from March 21 to Paschal Full Moon
# j - Weekday for PFM (0=Sunday, etc)
# p - Number of days from March 21 to Sunday on or before PFM
# (-6 to 28 methods 1 & 3, to 56 for method 2)
# e - Extra days to add for method 2 (converting Julian
# date to Gregorian date)
y = year
g = y % 19
e = 0
if method < 3:
# Old method
i = (19*g+15)%30
j = (y+y//4+i)%7
if method == 2:
# Extra dates to convert Julian to Gregorian date
e = 10
if y > 1600:
e = e+y//100-16-(y//100-16)//4
else:
# New method
c = y//100
h = (c-c//4-(8*c+13)//25+19*g+15)%30
i = h-(h//28)*(1-(h//28)*(29//(h+1))*((21-g)//11))
j = (y+y//4+i+2-c+c//4)%7
# p can be from -6 to 56 corresponding to dates 22 March to 23 May
# (later dates apply to method 2, although 23 May never actually occurs)
p = i-j+e
d = 1+(p+27+(p+6)//40)%31
m = 3+(p+26)//30
return datetime.date(int(y),int(m),int(d))
| Python |
"""
Copyright (c) 2003-2007 Gustavo Niemeyer <gustavo@niemeyer.net>
This module offers extensions to the standard python 2.3+
datetime module.
"""
__author__ = "Gustavo Niemeyer <gustavo@niemeyer.net>"
__license__ = "PSF License"
import datetime
import struct
import time
import sys
import os
relativedelta = None
parser = None
rrule = None
__all__ = ["tzutc", "tzoffset", "tzlocal", "tzfile", "tzrange",
"tzstr", "tzical", "tzwin", "tzwinlocal", "gettz"]
try:
from dateutil.tzwin import tzwin, tzwinlocal
except (ImportError, OSError):
tzwin, tzwinlocal = None, None
ZERO = datetime.timedelta(0)
EPOCHORDINAL = datetime.datetime.utcfromtimestamp(0).toordinal()
class tzutc(datetime.tzinfo):
def utcoffset(self, dt):
return ZERO
def dst(self, dt):
return ZERO
def tzname(self, dt):
return "UTC"
def __eq__(self, other):
return (isinstance(other, tzutc) or
(isinstance(other, tzoffset) and other._offset == ZERO))
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return "%s()" % self.__class__.__name__
__reduce__ = object.__reduce__
class tzoffset(datetime.tzinfo):
def __init__(self, name, offset):
self._name = name
self._offset = datetime.timedelta(seconds=offset)
def utcoffset(self, dt):
return self._offset
def dst(self, dt):
return ZERO
def tzname(self, dt):
return self._name
def __eq__(self, other):
return (isinstance(other, tzoffset) and
self._offset == other._offset)
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return "%s(%s, %s)" % (self.__class__.__name__,
`self._name`,
self._offset.days*86400+self._offset.seconds)
__reduce__ = object.__reduce__
class tzlocal(datetime.tzinfo):
_std_offset = datetime.timedelta(seconds=-time.timezone)
if time.daylight:
_dst_offset = datetime.timedelta(seconds=-time.altzone)
else:
_dst_offset = _std_offset
def utcoffset(self, dt):
if self._isdst(dt):
return self._dst_offset
else:
return self._std_offset
def dst(self, dt):
if self._isdst(dt):
return self._dst_offset-self._std_offset
else:
return ZERO
def tzname(self, dt):
return time.tzname[self._isdst(dt)]
def _isdst(self, dt):
# We can't use mktime here. It is unstable when deciding if
# the hour near to a change is DST or not.
#
# timestamp = time.mktime((dt.year, dt.month, dt.day, dt.hour,
# dt.minute, dt.second, dt.weekday(), 0, -1))
# return time.localtime(timestamp).tm_isdst
#
# The code above yields the following result:
#
#>>> import tz, datetime
#>>> t = tz.tzlocal()
#>>> datetime.datetime(2003,2,15,23,tzinfo=t).tzname()
#'BRDT'
#>>> datetime.datetime(2003,2,16,0,tzinfo=t).tzname()
#'BRST'
#>>> datetime.datetime(2003,2,15,23,tzinfo=t).tzname()
#'BRST'
#>>> datetime.datetime(2003,2,15,22,tzinfo=t).tzname()
#'BRDT'
#>>> datetime.datetime(2003,2,15,23,tzinfo=t).tzname()
#'BRDT'
#
# Here is a more stable implementation:
#
timestamp = ((dt.toordinal() - EPOCHORDINAL) * 86400
+ dt.hour * 3600
+ dt.minute * 60
+ dt.second)
return time.localtime(timestamp+time.timezone).tm_isdst
def __eq__(self, other):
if not isinstance(other, tzlocal):
return False
return (self._std_offset == other._std_offset and
self._dst_offset == other._dst_offset)
return True
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return "%s()" % self.__class__.__name__
__reduce__ = object.__reduce__
class _ttinfo(object):
__slots__ = ["offset", "delta", "isdst", "abbr", "isstd", "isgmt"]
def __init__(self):
for attr in self.__slots__:
setattr(self, attr, None)
def __repr__(self):
l = []
for attr in self.__slots__:
value = getattr(self, attr)
if value is not None:
l.append("%s=%s" % (attr, `value`))
return "%s(%s)" % (self.__class__.__name__, ", ".join(l))
def __eq__(self, other):
if not isinstance(other, _ttinfo):
return False
return (self.offset == other.offset and
self.delta == other.delta and
self.isdst == other.isdst and
self.abbr == other.abbr and
self.isstd == other.isstd and
self.isgmt == other.isgmt)
def __ne__(self, other):
return not self.__eq__(other)
def __getstate__(self):
state = {}
for name in self.__slots__:
state[name] = getattr(self, name, None)
return state
def __setstate__(self, state):
for name in self.__slots__:
if name in state:
setattr(self, name, state[name])
class tzfile(datetime.tzinfo):
# http://www.twinsun.com/tz/tz-link.htm
# ftp://elsie.nci.nih.gov/pub/tz*.tar.gz
def __init__(self, fileobj):
if isinstance(fileobj, basestring):
self._filename = fileobj
fileobj = open(fileobj)
elif hasattr(fileobj, "name"):
self._filename = fileobj.name
else:
self._filename = `fileobj`
# From tzfile(5):
#
# The time zone information files used by tzset(3)
# begin with the magic characters "TZif" to identify
# them as time zone information files, followed by
# sixteen bytes reserved for future use, followed by
# six four-byte values of type long, written in a
# ``standard'' byte order (the high-order byte
# of the value is written first).
if fileobj.read(4) != "TZif":
raise ValueError, "magic not found"
fileobj.read(16)
(
# The number of UTC/local indicators stored in the file.
ttisgmtcnt,
# The number of standard/wall indicators stored in the file.
ttisstdcnt,
# The number of leap seconds for which data is
# stored in the file.
leapcnt,
# The number of "transition times" for which data
# is stored in the file.
timecnt,
# The number of "local time types" for which data
# is stored in the file (must not be zero).
typecnt,
# The number of characters of "time zone
# abbreviation strings" stored in the file.
charcnt,
) = struct.unpack(">6l", fileobj.read(24))
# The above header is followed by tzh_timecnt four-byte
# values of type long, sorted in ascending order.
# These values are written in ``standard'' byte order.
# Each is used as a transition time (as returned by
# time(2)) at which the rules for computing local time
# change.
if timecnt:
self._trans_list = struct.unpack(">%dl" % timecnt,
fileobj.read(timecnt*4))
else:
self._trans_list = []
# Next come tzh_timecnt one-byte values of type unsigned
# char; each one tells which of the different types of
# ``local time'' types described in the file is associated
# with the same-indexed transition time. These values
# serve as indices into an array of ttinfo structures that
# appears next in the file.
if timecnt:
self._trans_idx = struct.unpack(">%dB" % timecnt,
fileobj.read(timecnt))
else:
self._trans_idx = []
# Each ttinfo structure is written as a four-byte value
# for tt_gmtoff of type long, in a standard byte
# order, followed by a one-byte value for tt_isdst
# and a one-byte value for tt_abbrind. In each
# structure, tt_gmtoff gives the number of
# seconds to be added to UTC, tt_isdst tells whether
# tm_isdst should be set by localtime(3), and
# tt_abbrind serves as an index into the array of
# time zone abbreviation characters that follow the
# ttinfo structure(s) in the file.
ttinfo = []
for i in range(typecnt):
ttinfo.append(struct.unpack(">lbb", fileobj.read(6)))
abbr = fileobj.read(charcnt)
# Then there are tzh_leapcnt pairs of four-byte
# values, written in standard byte order; the
# first value of each pair gives the time (as
# returned by time(2)) at which a leap second
# occurs; the second gives the total number of
# leap seconds to be applied after the given time.
# The pairs of values are sorted in ascending order
# by time.
# Not used, for now
if leapcnt:
leap = struct.unpack(">%dl" % (leapcnt*2),
fileobj.read(leapcnt*8))
# Then there are tzh_ttisstdcnt standard/wall
# indicators, each stored as a one-byte value;
# they tell whether the transition times associated
# with local time types were specified as standard
# time or wall clock time, and are used when
# a time zone file is used in handling POSIX-style
# time zone environment variables.
if ttisstdcnt:
isstd = struct.unpack(">%db" % ttisstdcnt,
fileobj.read(ttisstdcnt))
# Finally, there are tzh_ttisgmtcnt UTC/local
# indicators, each stored as a one-byte value;
# they tell whether the transition times associated
# with local time types were specified as UTC or
# local time, and are used when a time zone file
# is used in handling POSIX-style time zone envi-
# ronment variables.
if ttisgmtcnt:
isgmt = struct.unpack(">%db" % ttisgmtcnt,
fileobj.read(ttisgmtcnt))
# ** Everything has been read **
# Build ttinfo list
self._ttinfo_list = []
for i in range(typecnt):
gmtoff, isdst, abbrind = ttinfo[i]
# Round to full-minutes if that's not the case. Python's
# datetime doesn't accept sub-minute timezones. Check
# http://python.org/sf/1447945 for some information.
gmtoff = (gmtoff+30)//60*60
tti = _ttinfo()
tti.offset = gmtoff
tti.delta = datetime.timedelta(seconds=gmtoff)
tti.isdst = isdst
tti.abbr = abbr[abbrind:abbr.find('\x00', abbrind)]
tti.isstd = (ttisstdcnt > i and isstd[i] != 0)
tti.isgmt = (ttisgmtcnt > i and isgmt[i] != 0)
self._ttinfo_list.append(tti)
# Replace ttinfo indexes for ttinfo objects.
trans_idx = []
for idx in self._trans_idx:
trans_idx.append(self._ttinfo_list[idx])
self._trans_idx = tuple(trans_idx)
# Set standard, dst, and before ttinfos. before will be
# used when a given time is before any transitions,
# and will be set to the first non-dst ttinfo, or to
# the first dst, if all of them are dst.
self._ttinfo_std = None
self._ttinfo_dst = None
self._ttinfo_before = None
if self._ttinfo_list:
if not self._trans_list:
self._ttinfo_std = self._ttinfo_first = self._ttinfo_list[0]
else:
for i in range(timecnt-1,-1,-1):
tti = self._trans_idx[i]
if not self._ttinfo_std and not tti.isdst:
self._ttinfo_std = tti
elif not self._ttinfo_dst and tti.isdst:
self._ttinfo_dst = tti
if self._ttinfo_std and self._ttinfo_dst:
break
else:
if self._ttinfo_dst and not self._ttinfo_std:
self._ttinfo_std = self._ttinfo_dst
for tti in self._ttinfo_list:
if not tti.isdst:
self._ttinfo_before = tti
break
else:
self._ttinfo_before = self._ttinfo_list[0]
# Now fix transition times to become relative to wall time.
#
# I'm not sure about this. In my tests, the tz source file
# is setup to wall time, and in the binary file isstd and
# isgmt are off, so it should be in wall time. OTOH, it's
# always in gmt time. Let me know if you have comments
# about this.
laststdoffset = 0
self._trans_list = list(self._trans_list)
for i in range(len(self._trans_list)):
tti = self._trans_idx[i]
if not tti.isdst:
# This is std time.
self._trans_list[i] += tti.offset
laststdoffset = tti.offset
else:
# This is dst time. Convert to std.
self._trans_list[i] += laststdoffset
self._trans_list = tuple(self._trans_list)
def _find_ttinfo(self, dt, laststd=0):
timestamp = ((dt.toordinal() - EPOCHORDINAL) * 86400
+ dt.hour * 3600
+ dt.minute * 60
+ dt.second)
idx = 0
for trans in self._trans_list:
if timestamp < trans:
break
idx += 1
else:
return self._ttinfo_std
if idx == 0:
return self._ttinfo_before
if laststd:
while idx > 0:
tti = self._trans_idx[idx-1]
if not tti.isdst:
return tti
idx -= 1
else:
return self._ttinfo_std
else:
return self._trans_idx[idx-1]
def utcoffset(self, dt):
if not self._ttinfo_std:
return ZERO
return self._find_ttinfo(dt).delta
def dst(self, dt):
if not self._ttinfo_dst:
return ZERO
tti = self._find_ttinfo(dt)
if not tti.isdst:
return ZERO
# The documentation says that utcoffset()-dst() must
# be constant for every dt.
return tti.delta-self._find_ttinfo(dt, laststd=1).delta
# An alternative for that would be:
#
# return self._ttinfo_dst.offset-self._ttinfo_std.offset
#
# However, this class stores historical changes in the
# dst offset, so I belive that this wouldn't be the right
# way to implement this.
def tzname(self, dt):
if not self._ttinfo_std:
return None
return self._find_ttinfo(dt).abbr
def __eq__(self, other):
if not isinstance(other, tzfile):
return False
return (self._trans_list == other._trans_list and
self._trans_idx == other._trans_idx and
self._ttinfo_list == other._ttinfo_list)
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, `self._filename`)
def __reduce__(self):
if not os.path.isfile(self._filename):
raise ValueError, "Unpickable %s class" % self.__class__.__name__
return (self.__class__, (self._filename,))
class tzrange(datetime.tzinfo):
def __init__(self, stdabbr, stdoffset=None,
dstabbr=None, dstoffset=None,
start=None, end=None):
global relativedelta
if not relativedelta:
from dateutil import relativedelta
self._std_abbr = stdabbr
self._dst_abbr = dstabbr
if stdoffset is not None:
self._std_offset = datetime.timedelta(seconds=stdoffset)
else:
self._std_offset = ZERO
if dstoffset is not None:
self._dst_offset = datetime.timedelta(seconds=dstoffset)
elif dstabbr and stdoffset is not None:
self._dst_offset = self._std_offset+datetime.timedelta(hours=+1)
else:
self._dst_offset = ZERO
if dstabbr and start is None:
self._start_delta = relativedelta.relativedelta(
hours=+2, month=4, day=1, weekday=relativedelta.SU(+1))
else:
self._start_delta = start
if dstabbr and end is None:
self._end_delta = relativedelta.relativedelta(
hours=+1, month=10, day=31, weekday=relativedelta.SU(-1))
else:
self._end_delta = end
def utcoffset(self, dt):
if self._isdst(dt):
return self._dst_offset
else:
return self._std_offset
def dst(self, dt):
if self._isdst(dt):
return self._dst_offset-self._std_offset
else:
return ZERO
def tzname(self, dt):
if self._isdst(dt):
return self._dst_abbr
else:
return self._std_abbr
def _isdst(self, dt):
if not self._start_delta:
return False
year = datetime.datetime(dt.year,1,1)
start = year+self._start_delta
end = year+self._end_delta
dt = dt.replace(tzinfo=None)
if start < end:
return dt >= start and dt < end
else:
return dt >= start or dt < end
def __eq__(self, other):
if not isinstance(other, tzrange):
return False
return (self._std_abbr == other._std_abbr and
self._dst_abbr == other._dst_abbr and
self._std_offset == other._std_offset and
self._dst_offset == other._dst_offset and
self._start_delta == other._start_delta and
self._end_delta == other._end_delta)
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return "%s(...)" % self.__class__.__name__
__reduce__ = object.__reduce__
class tzstr(tzrange):
def __init__(self, s):
global parser
if not parser:
from dateutil import parser
self._s = s
res = parser._parsetz(s)
if res is None:
raise ValueError, "unknown string format"
# Here we break the compatibility with the TZ variable handling.
# GMT-3 actually *means* the timezone -3.
if res.stdabbr in ("GMT", "UTC"):
res.stdoffset *= -1
# We must initialize it first, since _delta() needs
# _std_offset and _dst_offset set. Use False in start/end
# to avoid building it two times.
tzrange.__init__(self, res.stdabbr, res.stdoffset,
res.dstabbr, res.dstoffset,
start=False, end=False)
if not res.dstabbr:
self._start_delta = None
self._end_delta = None
else:
self._start_delta = self._delta(res.start)
if self._start_delta:
self._end_delta = self._delta(res.end, isend=1)
def _delta(self, x, isend=0):
kwargs = {}
if x.month is not None:
kwargs["month"] = x.month
if x.weekday is not None:
kwargs["weekday"] = relativedelta.weekday(x.weekday, x.week)
if x.week > 0:
kwargs["day"] = 1
else:
kwargs["day"] = 31
elif x.day:
kwargs["day"] = x.day
elif x.yday is not None:
kwargs["yearday"] = x.yday
elif x.jyday is not None:
kwargs["nlyearday"] = x.jyday
if not kwargs:
# Default is to start on first sunday of april, and end
# on last sunday of october.
if not isend:
kwargs["month"] = 4
kwargs["day"] = 1
kwargs["weekday"] = relativedelta.SU(+1)
else:
kwargs["month"] = 10
kwargs["day"] = 31
kwargs["weekday"] = relativedelta.SU(-1)
if x.time is not None:
kwargs["seconds"] = x.time
else:
# Default is 2AM.
kwargs["seconds"] = 7200
if isend:
# Convert to standard time, to follow the documented way
# of working with the extra hour. See the documentation
# of the tzinfo class.
delta = self._dst_offset-self._std_offset
kwargs["seconds"] -= delta.seconds+delta.days*86400
return relativedelta.relativedelta(**kwargs)
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, `self._s`)
class _tzicalvtzcomp:
def __init__(self, tzoffsetfrom, tzoffsetto, isdst,
tzname=None, rrule=None):
self.tzoffsetfrom = datetime.timedelta(seconds=tzoffsetfrom)
self.tzoffsetto = datetime.timedelta(seconds=tzoffsetto)
self.tzoffsetdiff = self.tzoffsetto-self.tzoffsetfrom
self.isdst = isdst
self.tzname = tzname
self.rrule = rrule
class _tzicalvtz(datetime.tzinfo):
def __init__(self, tzid, comps=[]):
self._tzid = tzid
self._comps = comps
self._cachedate = []
self._cachecomp = []
def _find_comp(self, dt):
if len(self._comps) == 1:
return self._comps[0]
dt = dt.replace(tzinfo=None)
try:
return self._cachecomp[self._cachedate.index(dt)]
except ValueError:
pass
lastcomp = None
lastcompdt = None
for comp in self._comps:
if not comp.isdst:
# Handle the extra hour in DST -> STD
compdt = comp.rrule.before(dt-comp.tzoffsetdiff, inc=True)
else:
compdt = comp.rrule.before(dt, inc=True)
if compdt and (not lastcompdt or lastcompdt < compdt):
lastcompdt = compdt
lastcomp = comp
if not lastcomp:
# RFC says nothing about what to do when a given
# time is before the first onset date. We'll look for the
# first standard component, or the first component, if
# none is found.
for comp in self._comps:
if not comp.isdst:
lastcomp = comp
break
else:
lastcomp = comp[0]
self._cachedate.insert(0, dt)
self._cachecomp.insert(0, lastcomp)
if len(self._cachedate) > 10:
self._cachedate.pop()
self._cachecomp.pop()
return lastcomp
def utcoffset(self, dt):
return self._find_comp(dt).tzoffsetto
def dst(self, dt):
comp = self._find_comp(dt)
if comp.isdst:
return comp.tzoffsetdiff
else:
return ZERO
def tzname(self, dt):
return self._find_comp(dt).tzname
def __repr__(self):
return "<tzicalvtz %s>" % `self._tzid`
__reduce__ = object.__reduce__
class tzical:
def __init__(self, fileobj):
global rrule
if not rrule:
from dateutil import rrule
if isinstance(fileobj, basestring):
self._s = fileobj
fileobj = open(fileobj)
elif hasattr(fileobj, "name"):
self._s = fileobj.name
else:
self._s = `fileobj`
self._vtz = {}
self._parse_rfc(fileobj.read())
def keys(self):
return self._vtz.keys()
def get(self, tzid=None):
if tzid is None:
keys = self._vtz.keys()
if len(keys) == 0:
raise ValueError, "no timezones defined"
elif len(keys) > 1:
raise ValueError, "more than one timezone available"
tzid = keys[0]
return self._vtz.get(tzid)
def _parse_offset(self, s):
s = s.strip()
if not s:
raise ValueError, "empty offset"
if s[0] in ('+', '-'):
signal = (-1,+1)[s[0]=='+']
s = s[1:]
else:
signal = +1
if len(s) == 4:
return (int(s[:2])*3600+int(s[2:])*60)*signal
elif len(s) == 6:
return (int(s[:2])*3600+int(s[2:4])*60+int(s[4:]))*signal
else:
raise ValueError, "invalid offset: "+s
def _parse_rfc(self, s):
lines = s.splitlines()
if not lines:
raise ValueError, "empty string"
# Unfold
i = 0
while i < len(lines):
line = lines[i].rstrip()
if not line:
del lines[i]
elif i > 0 and line[0] == " ":
lines[i-1] += line[1:]
del lines[i]
else:
i += 1
tzid = None
comps = []
invtz = False
comptype = None
for line in lines:
if not line:
continue
name, value = line.split(':', 1)
parms = name.split(';')
if not parms:
raise ValueError, "empty property name"
name = parms[0].upper()
parms = parms[1:]
if invtz:
if name == "BEGIN":
if value in ("STANDARD", "DAYLIGHT"):
# Process component
pass
else:
raise ValueError, "unknown component: "+value
comptype = value
founddtstart = False
tzoffsetfrom = None
tzoffsetto = None
rrulelines = []
tzname = None
elif name == "END":
if value == "VTIMEZONE":
if comptype:
raise ValueError, \
"component not closed: "+comptype
if not tzid:
raise ValueError, \
"mandatory TZID not found"
if not comps:
raise ValueError, \
"at least one component is needed"
# Process vtimezone
self._vtz[tzid] = _tzicalvtz(tzid, comps)
invtz = False
elif value == comptype:
if not founddtstart:
raise ValueError, \
"mandatory DTSTART not found"
if tzoffsetfrom is None:
raise ValueError, \
"mandatory TZOFFSETFROM not found"
if tzoffsetto is None:
raise ValueError, \
"mandatory TZOFFSETFROM not found"
# Process component
rr = None
if rrulelines:
rr = rrule.rrulestr("\n".join(rrulelines),
compatible=True,
ignoretz=True,
cache=True)
comp = _tzicalvtzcomp(tzoffsetfrom, tzoffsetto,
(comptype == "DAYLIGHT"),
tzname, rr)
comps.append(comp)
comptype = None
else:
raise ValueError, \
"invalid component end: "+value
elif comptype:
if name == "DTSTART":
rrulelines.append(line)
founddtstart = True
elif name in ("RRULE", "RDATE", "EXRULE", "EXDATE"):
rrulelines.append(line)
elif name == "TZOFFSETFROM":
if parms:
raise ValueError, \
"unsupported %s parm: %s "%(name, parms[0])
tzoffsetfrom = self._parse_offset(value)
elif name == "TZOFFSETTO":
if parms:
raise ValueError, \
"unsupported TZOFFSETTO parm: "+parms[0]
tzoffsetto = self._parse_offset(value)
elif name == "TZNAME":
if parms:
raise ValueError, \
"unsupported TZNAME parm: "+parms[0]
tzname = value
elif name == "COMMENT":
pass
else:
raise ValueError, "unsupported property: "+name
else:
if name == "TZID":
if parms:
raise ValueError, \
"unsupported TZID parm: "+parms[0]
tzid = value
elif name in ("TZURL", "LAST-MODIFIED", "COMMENT"):
pass
else:
raise ValueError, "unsupported property: "+name
elif name == "BEGIN" and value == "VTIMEZONE":
tzid = None
comps = []
invtz = True
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, `self._s`)
if sys.platform != "win32":
TZFILES = ["/etc/localtime", "localtime"]
TZPATHS = ["/usr/share/zoneinfo", "/usr/lib/zoneinfo", "/etc/zoneinfo"]
else:
TZFILES = []
TZPATHS = []
def gettz(name=None):
tz = None
if not name:
try:
name = os.environ["TZ"]
except KeyError:
pass
if name is None or name == ":":
for filepath in TZFILES:
if not os.path.isabs(filepath):
filename = filepath
for path in TZPATHS:
filepath = os.path.join(path, filename)
if os.path.isfile(filepath):
break
else:
continue
if os.path.isfile(filepath):
try:
tz = tzfile(filepath)
break
except (IOError, OSError, ValueError):
pass
else:
tz = tzlocal()
else:
if name.startswith(":"):
name = name[:-1]
if os.path.isabs(name):
if os.path.isfile(name):
tz = tzfile(name)
else:
tz = None
else:
for path in TZPATHS:
filepath = os.path.join(path, name)
if not os.path.isfile(filepath):
filepath = filepath.replace(' ','_')
if not os.path.isfile(filepath):
continue
try:
tz = tzfile(filepath)
break
except (IOError, OSError, ValueError):
pass
else:
tz = None
if tzwin:
try:
tz = tzwin(name)
except OSError:
pass
if not tz:
from dateutil.zoneinfo import gettz
tz = gettz(name)
if not tz:
for c in name:
# name must have at least one offset to be a tzstr
if c in "0123456789":
try:
tz = tzstr(name)
except ValueError:
pass
break
else:
if name in ("GMT", "UTC"):
tz = tzutc()
elif name in time.tzname:
tz = tzlocal()
return tz
# vim:ts=4:sw=4:et
| Python |
"""
Copyright (c) 2003-2005 Gustavo Niemeyer <gustavo@niemeyer.net>
This module offers extensions to the standard python 2.3+
datetime module.
"""
from dateutil.tz import tzfile
from tarfile import TarFile
import os
__author__ = "Gustavo Niemeyer <gustavo@niemeyer.net>"
__license__ = "PSF License"
__all__ = ["setcachesize", "gettz", "rebuild"]
CACHE = []
CACHESIZE = 10
class tzfile(tzfile):
def __reduce__(self):
return (gettz, (self._filename,))
def getzoneinfofile():
filenames = os.listdir(os.path.join(os.path.dirname(__file__)))
filenames.sort()
filenames.reverse()
for entry in filenames:
if entry.startswith("zoneinfo") and ".tar." in entry:
return os.path.join(os.path.dirname(__file__), entry)
return None
ZONEINFOFILE = getzoneinfofile()
del getzoneinfofile
def setcachesize(size):
global CACHESIZE, CACHE
CACHESIZE = size
del CACHE[size:]
def gettz(name):
tzinfo = None
if ZONEINFOFILE:
for cachedname, tzinfo in CACHE:
if cachedname == name:
break
else:
tf = TarFile.open(ZONEINFOFILE)
try:
zonefile = tf.extractfile(name)
except KeyError:
tzinfo = None
else:
tzinfo = tzfile(zonefile)
tf.close()
CACHE.insert(0, (name, tzinfo))
del CACHE[CACHESIZE:]
return tzinfo
def rebuild(filename, tag=None, format="gz"):
import tempfile, shutil
tmpdir = tempfile.mkdtemp()
zonedir = os.path.join(tmpdir, "zoneinfo")
moduledir = os.path.dirname(__file__)
if tag: tag = "-"+tag
targetname = "zoneinfo%s.tar.%s" % (tag, format)
try:
tf = TarFile.open(filename)
for name in tf.getnames():
if not (name.endswith(".sh") or
name.endswith(".tab") or
name == "leapseconds"):
tf.extract(name, tmpdir)
filepath = os.path.join(tmpdir, name)
os.system("zic -d %s %s" % (zonedir, filepath))
tf.close()
target = os.path.join(moduledir, targetname)
for entry in os.listdir(moduledir):
if entry.startswith("zoneinfo") and ".tar." in entry:
os.unlink(os.path.join(moduledir, entry))
tf = TarFile.open(target, "w:%s" % format)
for entry in os.listdir(zonedir):
entrypath = os.path.join(zonedir, entry)
tf.add(entrypath, entry)
tf.close()
finally:
shutil.rmtree(tmpdir)
| Python |
# This code was originally contributed by Jeffrey Harris.
import datetime
import struct
import _winreg
__author__ = "Jeffrey Harris & Gustavo Niemeyer <gustavo@niemeyer.net>"
__all__ = ["tzwin", "tzwinlocal"]
ONEWEEK = datetime.timedelta(7)
TZKEYNAMENT = r"SOFTWARE\Microsoft\Windows NT\CurrentVersion\Time Zones"
TZKEYNAME9X = r"SOFTWARE\Microsoft\Windows\CurrentVersion\Time Zones"
TZLOCALKEYNAME = r"SYSTEM\CurrentControlSet\Control\TimeZoneInformation"
def _settzkeyname():
global TZKEYNAME
handle = _winreg.ConnectRegistry(None, _winreg.HKEY_LOCAL_MACHINE)
try:
_winreg.OpenKey(handle, TZKEYNAMENT).Close()
TZKEYNAME = TZKEYNAMENT
except WindowsError:
TZKEYNAME = TZKEYNAME9X
handle.Close()
_settzkeyname()
class tzwinbase(datetime.tzinfo):
"""tzinfo class based on win32's timezones available in the registry."""
def utcoffset(self, dt):
if self._isdst(dt):
return datetime.timedelta(minutes=self._dstoffset)
else:
return datetime.timedelta(minutes=self._stdoffset)
def dst(self, dt):
if self._isdst(dt):
minutes = self._dstoffset - self._stdoffset
return datetime.timedelta(minutes=minutes)
else:
return datetime.timedelta(0)
def tzname(self, dt):
if self._isdst(dt):
return self._dstname
else:
return self._stdname
def list():
"""Return a list of all time zones known to the system."""
handle = _winreg.ConnectRegistry(None, _winreg.HKEY_LOCAL_MACHINE)
tzkey = _winreg.OpenKey(handle, TZKEYNAME)
result = [_winreg.EnumKey(tzkey, i)
for i in range(_winreg.QueryInfoKey(tzkey)[0])]
tzkey.Close()
handle.Close()
return result
list = staticmethod(list)
def display(self):
return self._display
def _isdst(self, dt):
dston = picknthweekday(dt.year, self._dstmonth, self._dstdayofweek,
self._dsthour, self._dstminute,
self._dstweeknumber)
dstoff = picknthweekday(dt.year, self._stdmonth, self._stddayofweek,
self._stdhour, self._stdminute,
self._stdweeknumber)
if dston < dstoff:
return dston <= dt.replace(tzinfo=None) < dstoff
else:
return not dstoff <= dt.replace(tzinfo=None) < dston
class tzwin(tzwinbase):
def __init__(self, name):
self._name = name
handle = _winreg.ConnectRegistry(None, _winreg.HKEY_LOCAL_MACHINE)
tzkey = _winreg.OpenKey(handle, "%s\%s" % (TZKEYNAME, name))
keydict = valuestodict(tzkey)
tzkey.Close()
handle.Close()
self._stdname = keydict["Std"].encode("iso-8859-1")
self._dstname = keydict["Dlt"].encode("iso-8859-1")
self._display = keydict["Display"]
# See http://ww_winreg.jsiinc.com/SUBA/tip0300/rh0398.htm
tup = struct.unpack("=3l16h", keydict["TZI"])
self._stdoffset = -tup[0]-tup[1] # Bias + StandardBias * -1
self._dstoffset = self._stdoffset-tup[2] # + DaylightBias * -1
(self._stdmonth,
self._stddayofweek, # Sunday = 0
self._stdweeknumber, # Last = 5
self._stdhour,
self._stdminute) = tup[4:9]
(self._dstmonth,
self._dstdayofweek, # Sunday = 0
self._dstweeknumber, # Last = 5
self._dsthour,
self._dstminute) = tup[12:17]
def __repr__(self):
return "tzwin(%s)" % repr(self._name)
def __reduce__(self):
return (self.__class__, (self._name,))
class tzwinlocal(tzwinbase):
def __init__(self):
handle = _winreg.ConnectRegistry(None, _winreg.HKEY_LOCAL_MACHINE)
tzlocalkey = _winreg.OpenKey(handle, TZLOCALKEYNAME)
keydict = valuestodict(tzlocalkey)
tzlocalkey.Close()
self._stdname = keydict["StandardName"].encode("iso-8859-1")
self._dstname = keydict["DaylightName"].encode("iso-8859-1")
try:
tzkey = _winreg.OpenKey(handle, "%s\%s"%(TZKEYNAME, self._stdname))
_keydict = valuestodict(tzkey)
self._display = _keydict["Display"]
tzkey.Close()
except OSError:
self._display = None
handle.Close()
self._stdoffset = -keydict["Bias"]-keydict["StandardBias"]
self._dstoffset = self._stdoffset-keydict["DaylightBias"]
# See http://ww_winreg.jsiinc.com/SUBA/tip0300/rh0398.htm
tup = struct.unpack("=8h", keydict["StandardStart"])
(self._stdmonth,
self._stddayofweek, # Sunday = 0
self._stdweeknumber, # Last = 5
self._stdhour,
self._stdminute) = tup[1:6]
tup = struct.unpack("=8h", keydict["DaylightStart"])
(self._dstmonth,
self._dstdayofweek, # Sunday = 0
self._dstweeknumber, # Last = 5
self._dsthour,
self._dstminute) = tup[1:6]
def __reduce__(self):
return (self.__class__, ())
def picknthweekday(year, month, dayofweek, hour, minute, whichweek):
"""dayofweek == 0 means Sunday, whichweek 5 means last instance"""
first = datetime.datetime(year, month, 1, hour, minute)
weekdayone = first.replace(day=((dayofweek-first.isoweekday())%7+1))
for n in xrange(whichweek):
dt = weekdayone+(whichweek-n)*ONEWEEK
if dt.month == month:
return dt
def valuestodict(key):
"""Convert a registry key's values to a dictionary."""
dict = {}
size = _winreg.QueryInfoKey(key)[1]
for i in range(size):
data = _winreg.EnumValue(key, i)
dict[data[0]] = data[1]
return dict
| Python |
"""
Copyright (c) 2003-2007 Gustavo Niemeyer <gustavo@niemeyer.net>
This module offers extensions to the standard python 2.3+
datetime module.
"""
__author__ = "Gustavo Niemeyer <gustavo@niemeyer.net>"
__license__ = "PSF License"
import datetime
import calendar
__all__ = ["relativedelta", "MO", "TU", "WE", "TH", "FR", "SA", "SU"]
class weekday(object):
__slots__ = ["weekday", "n"]
def __init__(self, weekday, n=None):
self.weekday = weekday
self.n = n
def __call__(self, n):
if n == self.n:
return self
else:
return self.__class__(self.weekday, n)
def __eq__(self, other):
try:
if self.weekday != other.weekday or self.n != other.n:
return False
except AttributeError:
return False
return True
def __repr__(self):
s = ("MO", "TU", "WE", "TH", "FR", "SA", "SU")[self.weekday]
if not self.n:
return s
else:
return "%s(%+d)" % (s, self.n)
MO, TU, WE, TH, FR, SA, SU = weekdays = tuple([weekday(x) for x in range(7)])
class relativedelta:
"""
The relativedelta type is based on the specification of the excelent
work done by M.-A. Lemburg in his mx.DateTime extension. However,
notice that this type does *NOT* implement the same algorithm as
his work. Do *NOT* expect it to behave like mx.DateTime's counterpart.
There's two different ways to build a relativedelta instance. The
first one is passing it two date/datetime classes:
relativedelta(datetime1, datetime2)
And the other way is to use the following keyword arguments:
year, month, day, hour, minute, second, microsecond:
Absolute information.
years, months, weeks, days, hours, minutes, seconds, microseconds:
Relative information, may be negative.
weekday:
One of the weekday instances (MO, TU, etc). These instances may
receive a parameter N, specifying the Nth weekday, which could
be positive or negative (like MO(+1) or MO(-2). Not specifying
it is the same as specifying +1. You can also use an integer,
where 0=MO.
leapdays:
Will add given days to the date found, if year is a leap
year, and the date found is post 28 of february.
yearday, nlyearday:
Set the yearday or the non-leap year day (jump leap days).
These are converted to day/month/leapdays information.
Here is the behavior of operations with relativedelta:
1) Calculate the absolute year, using the 'year' argument, or the
original datetime year, if the argument is not present.
2) Add the relative 'years' argument to the absolute year.
3) Do steps 1 and 2 for month/months.
4) Calculate the absolute day, using the 'day' argument, or the
original datetime day, if the argument is not present. Then,
subtract from the day until it fits in the year and month
found after their operations.
5) Add the relative 'days' argument to the absolute day. Notice
that the 'weeks' argument is multiplied by 7 and added to
'days'.
6) Do steps 1 and 2 for hour/hours, minute/minutes, second/seconds,
microsecond/microseconds.
7) If the 'weekday' argument is present, calculate the weekday,
with the given (wday, nth) tuple. wday is the index of the
weekday (0-6, 0=Mon), and nth is the number of weeks to add
forward or backward, depending on its signal. Notice that if
the calculated date is already Monday, for example, using
(0, 1) or (0, -1) won't change the day.
"""
def __init__(self, dt1=None, dt2=None,
years=0, months=0, days=0, leapdays=0, weeks=0,
hours=0, minutes=0, seconds=0, microseconds=0,
year=None, month=None, day=None, weekday=None,
yearday=None, nlyearday=None,
hour=None, minute=None, second=None, microsecond=None):
if dt1 and dt2:
if not isinstance(dt1, datetime.date) or \
not isinstance(dt2, datetime.date):
raise TypeError, "relativedelta only diffs datetime/date"
if type(dt1) is not type(dt2):
if not isinstance(dt1, datetime.datetime):
dt1 = datetime.datetime.fromordinal(dt1.toordinal())
elif not isinstance(dt2, datetime.datetime):
dt2 = datetime.datetime.fromordinal(dt2.toordinal())
self.years = 0
self.months = 0
self.days = 0
self.leapdays = 0
self.hours = 0
self.minutes = 0
self.seconds = 0
self.microseconds = 0
self.year = None
self.month = None
self.day = None
self.weekday = None
self.hour = None
self.minute = None
self.second = None
self.microsecond = None
self._has_time = 0
months = (dt1.year*12+dt1.month)-(dt2.year*12+dt2.month)
self._set_months(months)
dtm = self.__radd__(dt2)
if dt1 < dt2:
while dt1 > dtm:
months += 1
self._set_months(months)
dtm = self.__radd__(dt2)
else:
while dt1 < dtm:
months -= 1
self._set_months(months)
dtm = self.__radd__(dt2)
delta = dt1 - dtm
self.seconds = delta.seconds+delta.days*86400
self.microseconds = delta.microseconds
else:
self.years = years
self.months = months
self.days = days+weeks*7
self.leapdays = leapdays
self.hours = hours
self.minutes = minutes
self.seconds = seconds
self.microseconds = microseconds
self.year = year
self.month = month
self.day = day
self.hour = hour
self.minute = minute
self.second = second
self.microsecond = microsecond
if type(weekday) is int:
self.weekday = weekdays[weekday]
else:
self.weekday = weekday
yday = 0
if nlyearday:
yday = nlyearday
elif yearday:
yday = yearday
if yearday > 59:
self.leapdays = -1
if yday:
ydayidx = [31,59,90,120,151,181,212,243,273,304,334,366]
for idx, ydays in enumerate(ydayidx):
if yday <= ydays:
self.month = idx+1
if idx == 0:
self.day = ydays
else:
self.day = yday-ydayidx[idx-1]
break
else:
raise ValueError, "invalid year day (%d)" % yday
self._fix()
def _fix(self):
if abs(self.microseconds) > 999999:
s = self.microseconds//abs(self.microseconds)
div, mod = divmod(self.microseconds*s, 1000000)
self.microseconds = mod*s
self.seconds += div*s
if abs(self.seconds) > 59:
s = self.seconds//abs(self.seconds)
div, mod = divmod(self.seconds*s, 60)
self.seconds = mod*s
self.minutes += div*s
if abs(self.minutes) > 59:
s = self.minutes//abs(self.minutes)
div, mod = divmod(self.minutes*s, 60)
self.minutes = mod*s
self.hours += div*s
if abs(self.hours) > 23:
s = self.hours//abs(self.hours)
div, mod = divmod(self.hours*s, 24)
self.hours = mod*s
self.days += div*s
if abs(self.months) > 11:
s = self.months//abs(self.months)
div, mod = divmod(self.months*s, 12)
self.months = mod*s
self.years += div*s
if (self.hours or self.minutes or self.seconds or self.microseconds or
self.hour is not None or self.minute is not None or
self.second is not None or self.microsecond is not None):
self._has_time = 1
else:
self._has_time = 0
def _set_months(self, months):
self.months = months
if abs(self.months) > 11:
s = self.months//abs(self.months)
div, mod = divmod(self.months*s, 12)
self.months = mod*s
self.years = div*s
else:
self.years = 0
def __radd__(self, other):
if not isinstance(other, datetime.date):
raise TypeError, "unsupported type for add operation"
elif self._has_time and not isinstance(other, datetime.datetime):
other = datetime.datetime.fromordinal(other.toordinal())
year = (self.year or other.year)+self.years
month = self.month or other.month
if self.months:
assert 1 <= abs(self.months) <= 12
month += self.months
if month > 12:
year += 1
month -= 12
elif month < 1:
year -= 1
month += 12
day = min(calendar.monthrange(year, month)[1],
self.day or other.day)
repl = {"year": year, "month": month, "day": day}
for attr in ["hour", "minute", "second", "microsecond"]:
value = getattr(self, attr)
if value is not None:
repl[attr] = value
days = self.days
if self.leapdays and month > 2 and calendar.isleap(year):
days += self.leapdays
ret = (other.replace(**repl)
+ datetime.timedelta(days=days,
hours=self.hours,
minutes=self.minutes,
seconds=self.seconds,
microseconds=self.microseconds))
if self.weekday:
weekday, nth = self.weekday.weekday, self.weekday.n or 1
jumpdays = (abs(nth)-1)*7
if nth > 0:
jumpdays += (7-ret.weekday()+weekday)%7
else:
jumpdays += (ret.weekday()-weekday)%7
jumpdays *= -1
ret += datetime.timedelta(days=jumpdays)
return ret
def __rsub__(self, other):
return self.__neg__().__radd__(other)
def __add__(self, other):
if not isinstance(other, relativedelta):
raise TypeError, "unsupported type for add operation"
return relativedelta(years=other.years+self.years,
months=other.months+self.months,
days=other.days+self.days,
hours=other.hours+self.hours,
minutes=other.minutes+self.minutes,
seconds=other.seconds+self.seconds,
microseconds=other.microseconds+self.microseconds,
leapdays=other.leapdays or self.leapdays,
year=other.year or self.year,
month=other.month or self.month,
day=other.day or self.day,
weekday=other.weekday or self.weekday,
hour=other.hour or self.hour,
minute=other.minute or self.minute,
second=other.second or self.second,
microsecond=other.second or self.microsecond)
def __sub__(self, other):
if not isinstance(other, relativedelta):
raise TypeError, "unsupported type for sub operation"
return relativedelta(years=other.years-self.years,
months=other.months-self.months,
days=other.days-self.days,
hours=other.hours-self.hours,
minutes=other.minutes-self.minutes,
seconds=other.seconds-self.seconds,
microseconds=other.microseconds-self.microseconds,
leapdays=other.leapdays or self.leapdays,
year=other.year or self.year,
month=other.month or self.month,
day=other.day or self.day,
weekday=other.weekday or self.weekday,
hour=other.hour or self.hour,
minute=other.minute or self.minute,
second=other.second or self.second,
microsecond=other.second or self.microsecond)
def __neg__(self):
return relativedelta(years=-self.years,
months=-self.months,
days=-self.days,
hours=-self.hours,
minutes=-self.minutes,
seconds=-self.seconds,
microseconds=-self.microseconds,
leapdays=self.leapdays,
year=self.year,
month=self.month,
day=self.day,
weekday=self.weekday,
hour=self.hour,
minute=self.minute,
second=self.second,
microsecond=self.microsecond)
def __nonzero__(self):
return not (not self.years and
not self.months and
not self.days and
not self.hours and
not self.minutes and
not self.seconds and
not self.microseconds and
not self.leapdays and
self.year is None and
self.month is None and
self.day is None and
self.weekday is None and
self.hour is None and
self.minute is None and
self.second is None and
self.microsecond is None)
def __mul__(self, other):
f = float(other)
return relativedelta(years=self.years*f,
months=self.months*f,
days=self.days*f,
hours=self.hours*f,
minutes=self.minutes*f,
seconds=self.seconds*f,
microseconds=self.microseconds*f,
leapdays=self.leapdays,
year=self.year,
month=self.month,
day=self.day,
weekday=self.weekday,
hour=self.hour,
minute=self.minute,
second=self.second,
microsecond=self.microsecond)
def __eq__(self, other):
if not isinstance(other, relativedelta):
return False
if self.weekday or other.weekday:
if not self.weekday or not other.weekday:
return False
if self.weekday.weekday != other.weekday.weekday:
return False
n1, n2 = self.weekday.n, other.weekday.n
if n1 != n2 and not ((not n1 or n1 == 1) and (not n2 or n2 == 1)):
return False
return (self.years == other.years and
self.months == other.months and
self.days == other.days and
self.hours == other.hours and
self.minutes == other.minutes and
self.seconds == other.seconds and
self.leapdays == other.leapdays and
self.year == other.year and
self.month == other.month and
self.day == other.day and
self.hour == other.hour and
self.minute == other.minute and
self.second == other.second and
self.microsecond == other.microsecond)
def __ne__(self, other):
return not self.__eq__(other)
def __div__(self, other):
return self.__mul__(1/float(other))
def __repr__(self):
l = []
for attr in ["years", "months", "days", "leapdays",
"hours", "minutes", "seconds", "microseconds"]:
value = getattr(self, attr)
if value:
l.append("%s=%+d" % (attr, value))
for attr in ["year", "month", "day", "weekday",
"hour", "minute", "second", "microsecond"]:
value = getattr(self, attr)
if value is not None:
l.append("%s=%s" % (attr, `value`))
return "%s(%s)" % (self.__class__.__name__, ", ".join(l))
# vim:ts=4:sw=4:et
| Python |
#!/usr/bin/env python
import sys
import re
lineno = 0
for line in sys.stdin:
line = re.sub(r'[\r\n]+$', "", line)
lineno += 1
fields = line.split("\t")
outstr = str(lineno) + "\t"
outstr += fields[7] + "\t"
outstr += "url:"+fields[30] + "\n"
outstr += fields[13] + "\n"
outstr += fields[45] + "\t"
outstr += "(" + fields[52] + "," + fields[53] + ", " + fields[54] + ")"
print outstr
| Python |
# Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
parser for volunteermatch
"""
import xml_helpers as xmlh
from datetime import datetime
import dateutil.parser
# pylint: disable-msg=R0915
def parse(s, maxrecs, progress):
"""return FPXML given volunteermatch data"""
# TODO: progress
known_elnames = ['feed', 'title', 'subtitle', 'div', 'span', 'updated', 'id', 'link', 'icon', 'logo', 'author', 'name', 'uri', 'email', 'rights', 'entry', 'published', 'g:publish_date', 'g:expiration_date', 'g:event_date_range', 'g:start', 'g:end', 'updated', 'category', 'summary', 'content', 'awb:city', 'awb:country', 'awb:state', 'awb:postalcode', 'g:location', 'g:age_range', 'g:employer', 'g:job_type', 'g:job_industry', 'awb:paid', ]
xmldoc = xmlh.simple_parser(s, known_elnames, progress)
pubdate = xmlh.get_tag_val(xmldoc, "created")
ts = dateutil.parser.parse(pubdate)
pubdate = ts.strftime("%Y-%m-%dT%H:%M:%S")
# convert to footprint format
s = '<?xml version="1.0" ?>'
s += '<FootprintFeed schemaVersion="0.1">'
s += '<FeedInfo>'
# TODO: assign provider IDs?
s += '<providerID>104</providerID>'
s += '<providerName>volunteermatch.org</providerName>'
s += '<feedID>1</feedID>'
s += '<providerURL>http://www.volunteermatch.org/</providerURL>'
s += '<createdDateTime>%s</createdDateTime>' % (pubdate)
s += '<description></description>'
s += '</FeedInfo>'
numorgs = numopps = 0
# hardcoded: Organization
s += '<Organizations>'
items = xmldoc.getElementsByTagName("listing")
if (maxrecs > items.length or maxrecs == -1):
maxrecs = items.length
for item in items[0:maxrecs]:
orgs = item.getElementsByTagName("parent")
if (orgs.length == 1):
org = orgs[0]
s += '<Organization>'
s += '<organizationID>%s</organizationID>' % (xmlh.get_tag_val(org, "key"))
s += '<nationalEIN></nationalEIN>'
s += '<name>%s</name>' % (xmlh.get_tag_val(org, "name"))
s += '<missionStatement></missionStatement>'
s += '<description></description>'
s += '<location><city></city><region></region><postalCode></postalCode></location>'
s += '<organizationURL>%s</organizationURL>' % (xmlh.get_tag_val(org, "URL"))
s += '<donateURL></donateURL>'
s += '<logoURL></logoURL>'
s += '<detailURL>%s</detailURL>' % (xmlh.get_tag_val(org, "detailURL"))
s += '</Organization>'
numorgs += 1
else:
print datetime.now(), "parse_volunteermatch: listing does not have an organization"
return None
s += '</Organizations>'
s += '<VolunteerOpportunities>'
items = xmldoc.getElementsByTagName("listing")
for item in items[0:maxrecs]:
s += '<VolunteerOpportunity>'
s += '<volunteerOpportunityID>%s</volunteerOpportunityID>' % (xmlh.get_tag_val(item, "key"))
orgs = item.getElementsByTagName("parent")
if (orgs.length == 1):
org = orgs[0]
s += '<sponsoringOrganizationIDs><sponsoringOrganizationID>%s</sponsoringOrganizationID></sponsoringOrganizationIDs>' % (xmlh.get_tag_val(org, "key"))
else:
s += '<sponsoringOrganizationIDs><sponsoringOrganizationID>0</sponsoringOrganizationID></sponsoringOrganizationIDs>'
print datetime.now(), "parse_volunteermatch: listing does not have an organization"
s += '<title>%s</title>' % (xmlh.get_tag_val(item, "title"))
s += '<volunteersNeeded>-8888</volunteersNeeded>'
s += '<dateTimeDurations><dateTimeDuration>'
durations = xmlh.get_children_by_tagname(item, "duration")
if (len(durations) == 1):
duration = durations[0]
ongoing = duration.getAttribute("ongoing")
if (ongoing == 'true'):
s += '<openEnded>Yes</openEnded>'
else:
s += '<openEnded>No</openEnded>'
listingTimes = duration.getElementsByTagName("listingTime")
if (listingTimes.length == 1):
listingTime = listingTimes[0]
s += '<startTime>%s</startTime>' % (xmlh.get_tag_val(listingTime, "startTime"))
s += '<endTime>%s</endTime>' % (xmlh.get_tag_val(listingTime, "endTime"))
else:
print datetime.now(), "parse_volunteermatch: number of durations in item != 1"
return None
commitments = item.getElementsByTagName("commitment")
l_period = l_duration = ""
if (commitments.length == 1):
commitment = commitments[0]
l_num = xmlh.get_tag_val(commitment, "num")
l_duration = xmlh.get_tag_val(commitment, "duration")
l_period = xmlh.get_tag_val(commitment, "period")
if ((l_duration == "hours") and (l_period == "week")):
s += '<commitmentHoursPerWeek>' + l_num + '</commitmentHoursPerWeek>'
elif ((l_duration == "hours") and (l_period == "day")):
# note: weekdays only
s += '<commitmentHoursPerWeek>' + str(int(l_num)*5) + '</commitmentHoursPerWeek>'
elif ((l_duration == "hours") and (l_period == "month")):
hrs = int(float(l_num)/4.0)
if hrs < 1: hrs = 1
s += '<commitmentHoursPerWeek>' + str(hrs) + '</commitmentHoursPerWeek>'
elif ((l_duration == "hours") and (l_period == "event")):
# TODO: ignore for now, later compute the endTime if not already provided
pass
else:
print datetime.now(), "parse_volunteermatch: commitment given in units != hours/week: ", l_duration, "per", l_period
s += '</dateTimeDuration></dateTimeDurations>'
dbaddresses = item.getElementsByTagName("location")
if (dbaddresses.length != 1):
print datetime.now(), "parse_volunteermatch: only 1 location supported."
return None
dbaddress = dbaddresses[0]
s += '<locations><location>'
s += '<streetAddress1>%s</streetAddress1>' % (xmlh.get_tag_val(dbaddress, "street1"))
s += '<city>%s</city>' % (xmlh.get_tag_val(dbaddress, "city"))
s += '<region>%s</region>' % (xmlh.get_tag_val(dbaddress, "region"))
s += '<postalCode>%s</postalCode>' % (xmlh.get_tag_val(dbaddress, "postalCode"))
geolocs = item.getElementsByTagName("geolocation")
if (geolocs.length == 1):
geoloc = geolocs[0]
s += '<latitude>%s</latitude>' % (xmlh.get_tag_val(geoloc, "latitude"))
s += '<longitude>%s</longitude>' % (xmlh.get_tag_val(geoloc, "longitude"))
s += '</location></locations>'
s += '<audienceTags>'
audiences = item.getElementsByTagName("audience")
for audience in audiences:
type = xmlh.node_data(audience)
s += '<audienceTag>%s</audienceTag>' % (type)
s += '</audienceTags>'
s += '<categoryTags>'
categories = item.getElementsByTagName("category")
for category in categories:
type = xmlh.node_data(category)
s += '<categoryTag>%s</categoryTag>' % (type)
s += '</categoryTags>'
s += '<skills>%s</skills>' % (xmlh.get_tag_val(item, "skill"))
s += '<detailURL>%s</detailURL>' % (xmlh.get_tag_val(item, "detailURL"))
s += '<description>%s</description>' % (xmlh.get_tag_val(item, "description"))
expires = xmlh.get_tag_val(item, "expires")
ts = dateutil.parser.parse(expires)
expires = ts.strftime("%Y-%m-%dT%H:%M:%S")
s += '<expires>%s</expires>' % (expires)
s += '</VolunteerOpportunity>'
numopps += 1
s += '</VolunteerOpportunities>'
s += '</FootprintFeed>'
#s = re.sub(r'><([^/])', r'>\n<\1', s)
#print(s)
return s, numorgs, numopps
| Python |
# Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
parser for footprint itself (identity parse)
"""
import xml_helpers as xmlh
from datetime import datetime
import re
# 90 days
DEFAULT_EXPIRATION = (90 * 86400)
# 10 years
DEFAULT_DURATION = (10 * 365 * 86400)
KNOWN_ELNAMES = [
'FeedInfo', 'FootprintFeed', 'Organization', 'Organizations',
'VolunteerOpportunities', 'VolunteerOpportunity', 'abstract', 'audienceTag',
'audienceTags', 'categoryTag', 'categoryTags', 'city',
'commitmentHoursPerWeek', 'contactEmail', 'contactName', 'contactPhone',
'country', 'createdDateTime', 'dateTimeDuration', 'dateTimeDurationType',
'dateTimeDurations', 'description', 'detailURL', 'directions', 'donateURL',
'duration', 'email', 'endDate', 'endTime', 'expires', 'fax', 'feedID',
'guidestarID', 'iCalRecurrence', 'language', 'latitude', 'lastUpdated',
'location', 'locationType', 'locations', 'logoURL', 'longitude', 'minimumAge',
'missionStatement', 'name', 'nationalEIN', 'openEnded', 'organizationID',
'organizationURL', 'paid', 'phone', 'postalCode', 'providerID',
'providerName', 'providerURL', 'region', 'schemaVersion', 'sexRestrictedEnum',
'sexRestrictedTo', 'skills', 'sponsoringOrganizationID', 'startDate',
'startTime', 'streetAddress1', 'streetAddress2', 'streetAddress3', 'title',
'tzOlsonPath', 'virtual', 'volunteerHubOrganizationID',
'volunteerOpportunityID', 'volunteersFilled', 'volunteersSlots',
'volunteersNeeded', 'yesNoEnum'
]
def set_default_time_elem(doc, entity, tagname, timest=xmlh.current_ts()):
"""footprint macro."""
cdt = xmlh.set_default_value(doc, entity, tagname, timest)
xmlh.set_default_attr(doc, cdt, "olsonTZ", "America/Los_Angeles")
def parse_fast(instr, maxrecs, progress):
"""fast parser but doesn't check correctness,
i.e. must be pre-checked by caller."""
numorgs = numopps = 0
outstr = '<?xml version="1.0" ?>'
outstr += '<FootprintFeed schemaVersion="0.1">'
# note: processes Organizations first, so ID lookups work
feedchunks = re.findall(
re.compile('<FeedInfo>.+?</FeedInfo>', re.DOTALL), instr)
for feedchunk in feedchunks:
node = xmlh.simple_parser(feedchunk, KNOWN_ELNAMES, False)
xmlh.set_default_value(node, node.firstChild, "feedID", "0")
set_default_time_elem(node, node.firstChild, "createdDateTime")
outstr += xmlh.prettyxml(node, True)
orgchunks = re.findall(
re.compile('<Organization>.+?</Organization>', re.DOTALL), instr)
outstr += '<Organizations>'
for orgchunk in orgchunks:
node = xmlh.simple_parser(orgchunk, KNOWN_ELNAMES, False)
numorgs += 1
outstr += xmlh.prettyxml(node, True)
outstr += '</Organizations>'
oppchunks = re.findall(
re.compile('<VolunteerOpportunity>.+?</VolunteerOpportunity>',
re.DOTALL), instr)
outstr += '<VolunteerOpportunities>'
for oppchunk in oppchunks:
node = xmlh.simple_parser(oppchunk, KNOWN_ELNAMES, False)
numopps += 1
if (maxrecs > 0 and numopps > maxrecs):
break
if progress and numopps % 250 == 0:
print datetime.now(), ": ", numopps, " records generated."
for opp in node.firstChild.childNodes:
if opp.nodeType == node.ELEMENT_NODE:
xmlh.set_default_value(node, opp, "volunteersNeeded", -8888)
xmlh.set_default_value(node, opp, "paid", "No")
xmlh.set_default_value(node, opp, "sexRestrictedTo", "Neither")
xmlh.set_default_value(node, opp, "language", "English")
set_default_time_elem(node, opp, "lastUpdated")
set_default_time_elem(node, opp, "expires",
xmlh.current_ts(DEFAULT_EXPIRATION))
for loc in opp.getElementsByTagName("location"):
xmlh.set_default_value(node, loc, "virtual", "No")
xmlh.set_default_value(node, loc, "country", "US")
for dttm in opp.getElementsByTagName("dateTimeDurations"):
xmlh.set_default_value(node, dttm, "openEnded", "No")
xmlh.set_default_value(node, dttm, "iCalRecurrence", "")
if (dttm.getElementsByTagName("startTime") == None and
dttm.getElementsByTagName("endTime") == None):
set_default_time_elem(node, dttm, "timeFlexible", "Yes")
else:
set_default_time_elem(node, dttm, "timeFlexible", "No")
xmlh.set_default_value(node, dttm, "openEnded", "No")
time_elems = opp.getElementsByTagName("startTime")
time_elems += opp.getElementsByTagName("endTime")
for el in time_elems:
xmlh.set_default_attr(node, el, "olsonTZ", "America/Los_Angeles")
outstr += xmlh.prettyxml(node, True)
outstr += '</VolunteerOpportunities>'
outstr += '</FootprintFeed>'
return outstr, numorgs, numopps
def parse(instr, maxrecs, progress):
"""return python DOM object given FPXML"""
# parsing footprint format is the identity operation
# TODO: maxrecs
# TODO: progress
if progress:
print datetime.now(), "parse_footprint: parsing ", len(instr), " bytes."
xmldoc = xmlh.simple_parser(instr, KNOWN_ELNAMES, progress)
if progress:
print datetime.now(), "parse_footprint: done parsing."
return xmldoc
def parser(providerID, providerName, feedID, providerURL, feedDescription):
"""create an FPXML-compatible parser"""
feedinfo = "<FeedInfo>"
feedinfo += xmlh.output_val('providerID', providerID)
feedinfo += xmlh.output_val('providerName', providerName)
feedinfo += xmlh.output_val('feedID', feedID)
feedinfo += xmlh.output_val('createdDateTime', xmlh.current_ts())
feedinfo += xmlh.output_val('providerURL', providerURL)
feedinfo += xmlh.output_val('description', feedDescription)
feedinfo += "</FeedInfo>"
def parse_func(instr, maxrecs, progress):
outstr, numorgs, numopps = parse_fast(instr, maxrecs, progress)
return re.sub(re.compile(r'<FeedInfo>.+?</FeedInfo>', re.DOTALL),
feedinfo, outstr), numorgs, numopps
return parse_func
| Python |
# Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
dumping ground for functions common across all parsers.
"""
from xml.dom import minidom
from datetime import datetime
import xml.sax.saxutils
import xml.parsers.expat
import re
import sys
import time
# asah: I give up, allowing UTF-8 is just too hard without incurring
# crazy performance penalties
SIMPLE_CHARS = ''.join(map(chr, range(32, 126)))
SIMPLE_CHARS_CLASS = '[^\\n%s]' % re.escape(SIMPLE_CHARS)
SIMPLE_CHARS_RE = re.compile(SIMPLE_CHARS_CLASS)
PROGRESS_START_TS = datetime.now()
def clean_string(instr):
"""return a string that's safe wrt. utf-8 encoding."""
#print "SIMPLE_CHARS_CLASS=",SIMPLE_CHARS_CLASS
instr = instr.decode('ascii', 'replace')
return SIMPLE_CHARS_RE.sub('', instr).encode('UTF-8')
def node_data(entity):
"""get the data buried in the given node and escape it."""
if (entity.firstChild == None):
return ""
if (entity.firstChild.data == None):
return ""
outstr = entity.firstChild.data
outstr = xml.sax.saxutils.escape(outstr).encode('UTF-8')
outstr = re.sub(r'\n', r'\\n', outstr)
return outstr
def get_children_by_tagname(elem, name):
"""get all the children with a given name."""
temp = []
for child in elem.childNodes:
if child.nodeType == child.ELEMENT_NODE and child.nodeName == name:
temp.append(child)
return temp
def print_progress(msg, filename="", progress=True):
"""print progress indicator."""
if progress:
print str(datetime.now())+":"+filename, msg
def print_status(msg, filename="", progress=True):
"""print status indicator, for stats collection."""
print_progress(msg, "STATUS:"+filename, progress)
def print_rps_progress(noun, progress, recno, maxrecs):
"""print a progress indicator."""
maxrecs_str = ""
if maxrecs > 0:
maxrecs_str = " of " + str(maxrecs)
if progress and recno > 0 and recno % 250 == 0:
now = datetime.now()
secs_since_start = now - PROGRESS_START_TS
secs_elapsed = 3600*24.0*secs_since_start.days + \
1.0*secs_since_start.seconds + \
secs_since_start.microseconds / 1000000.0
rps = recno / secs_elapsed
print str(now)+": ", recno, noun, "processed" + maxrecs_str +\
" ("+str(int(rps))+" recs/sec)"
def get_tag_val(entity, tag):
"""walk the DOM of entity looking for the first child named (tag)."""
#print "----------------------------------------"
nodes = entity.getElementsByTagName(tag)
#print "nodes:", nodes
if (nodes.length == 0):
return ""
#print nodes[0]
if (nodes[0] == None):
return ""
if (nodes[0].firstChild == None):
return ""
if (nodes[0].firstChild.data == None):
return ""
outstr = "".join([node.data for node in nodes[0].childNodes if node.nodeType in [node.TEXT_NODE, node.CDATA_SECTION_NODE]])
outstr = outstr.strip()
#outstr = nodes[0].firstChild.data
outstr = xml.sax.saxutils.escape(outstr).encode('UTF-8')
outstr = re.sub(r'\n', r'\\n', outstr)
return outstr
def get_tag_attr(entity, tag, attribute):
"""Finds the first element named (tag) and returns the named
attribute."""
nodes = entity.getElementsByTagName(tag)
if (nodes.length == 0):
return ""
if (nodes[0] == None):
return ""
outstr = nodes[0].getAttribute(attribute)
outstr = xml.sax.saxutils.escape(outstr).encode('UTF-8')
outstr = re.sub(r'\n', r'\\n', outstr)
return outstr
def set_default_value(doc, entity, tagname, default_value):
"""add the element if not already present in the DOM tree."""
nodes = entity.getElementsByTagName(tagname)
if len(nodes) == 0:
newnode = doc.createElement(tagname)
newnode.appendChild(doc.createTextNode(str(default_value)))
entity.appendChild(newnode)
return newnode
return nodes[0]
def set_default_attr(doc, entity, attrname, default_value):
"""create and set the attribute if not already set."""
if entity.getAttributeNode(attrname) == None:
entity.setAttribute(attrname, default_value)
def validate_xml(xmldoc, known_elnames):
"""a simple XML validator, given known tagnames."""
for node in xmldoc.childNodes:
if (node.nodeType == node.ELEMENT_NODE and
node.tagName not in known_elnames):
#print "unknown tagName '"+node.tagName+"'"
pass
# TODO: spellchecking...
validate_xml(node, known_elnames)
def simple_parser(instr, known_elnames_list, progress):
"""a simple wrapper for parsing XML which attempts to handle errors."""
try:
if known_elnames_list:
known_elnames_dict = {}
for item in known_elnames_list:
known_elnames_dict[item] = True
if progress:
print datetime.now(), "parsing XML"
xmldoc = minidom.parseString(instr)
# this stuff is in a try-block to avoid use-before-def on xmldoc
if progress:
print datetime.now(), "validating XML..."
if known_elnames_list:
validate_xml(xmldoc, known_elnames_dict)
if progress:
print datetime.now(), "done."
return xmldoc
except xml.parsers.expat.ExpatError, err:
print datetime.now(), "XML parsing error on line ", err.lineno,
print ":", xml.parsers.expat.ErrorString(err.code),
print " (column ", err.offset, ")"
lines = instr.split("\n")
for i in range(err.lineno - 3, err.lineno + 3):
if i >= 0 and i < len(lines):
print "%6d %s" % (i+1, lines[i])
print "writing string to xmlerror.out..."
outfh = open("xmlerror.out", "w+")
outfh.write(instr)
outfh.close()
sys.exit(0)
def prettyxml(doc, strip_header = False):
"""return pretty-printed XML for doc."""
outstr = doc.toxml("UTF-8")
if strip_header:
outstr = re.sub(r'<\?xml version="1.0" encoding="UTF-8"\?>', r'', outstr)
outstr = re.sub(r'><', r'>\n<', outstr)
# toprettyxml wasn't that pretty...
return outstr
def output_val(name, val):
"""return <name>val</name>."""
return "<" + name + ">" + str(val) + "</" + name + ">"
def output_node(name, node, nodename):
"""return <name>get_tag_val(node)</name>."""
return output_val(name, get_tag_val(node, nodename))
def output_plural(name, val):
"""return <names><name>val</name></names>."""
return "<" + name + "s>" + output_val(name, val) + "</" + name + "s>"
def output_plural_node(name, node, nodename):
"""return <names><name>get_tag_val(node)</name></names>."""
return "<" + name + "s>" + output_node(name, node, nodename) + \
"</" + name + "s>"
def current_ts(delta_secs=0):
"""Return a formatted datetime string for the current time, e.g.
2008-12-30T14:30:10.5"""
return time.strftime("%Y-%m-%dT%H:%M:%S",
time.gmtime(time.mktime(time.gmtime()) + delta_secs))
def current_time(delta_secs=0):
"""Return a formatted time string for the current time, e.g. 14:30:10.5"""
return time.strftime("%H:%M:%S",
time.gmtime(time.mktime(time.gmtime()) + delta_secs))
def current_date(delta_secs=0):
"""Return a formatted date string for the current time, e.g. 2008-12-30"""
return time.strftime("%Y-%m-%d",
time.gmtime(time.mktime(time.gmtime()) + delta_secs))
| Python |
# Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
parser for craigslist custom crawl-- not FPXML
"""
# note: this is designed to consume the output from the craigslist crawler
# example record
#http://limaohio.craigslist.org/vol/1048151556.html-Q-<!DOCTYPE html PUBLIC
# "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose
#.dtd"> <html> <head> <title>Foster Parents Needed</title> <meta name="ro
#bots" content="NOARCHIVE"> <link rel="stylesheet" title="craigslist" href=
#"http://www.craigslist.org/styles/craigslist.css" type="text/css" media="al
#l"> </head> <body onload="initFlag(1048151556)" class="posting"> <div cl
#ass="bchead"> <a id="ef" href="/email.friend?postingID=1048151556">email th
#is posting to a friend</a> <a href="http://limaohio.craigslist.org">lima /
#findlay craigslist</a> > <a href="/vol/">volunteers</a> </div>
# <div id="flags"> <div id="flagMsg"> please <a href="http://www.craig
#slist.org/about/help/flags_and_community_moderation">flag</a> with care:
#</div> <div id="flagChooser"> <br> <a class="fl" id="flag16" href="
#/flag/?flagCode=16&postingID=1048151556" title="Wrong category, wro
#ng site, discusses another post, or otherwise misplaced"> miscategorize
#d</a> <br> <a class="fl" id="flag28" href="/flag/?flagCode=28&po
#stingID=1048151556" title="Violates craigslist Terms Of Use or other po
#sted guidelines"> prohibited</a> <br> <a class="fl" id="flag15"
#href="/flag/?flagCode=15&postingID=1048151556" title="Posted too fr
#equently, in multiple cities/categories, or is too commercial"> spam/ov
#erpost</a> <br> <a class="fl" id="flag9" href="/flag/?flagCode=9&
#;postingID=1048151556" title="Should be considered for inclusion in the
# Best-Of-Craigslist"> best of craigslist</a> <br> </div> </div>
# <h2>Foster Parents Needed (Northwest Ohio)</h2> <hr> Reply to: <a href="
#mailto:comm-10481515
#56@craigslist&#
#46;org?subject=Foster%20Parents%20Needed%20(Northwest%20Ohio
#)">comm-104815155
#;6@craigslist.&
##111;rg</a> <sup>[<a href="http://www.craigslist.org/about/help/r
#eplying_to_posts" target="_blank">Errors when replying to ads?</a>]</sup><b
#r> Date: 2009-02-24, 8:37AM EST<br> <br> <br> <div id="userbody"> Diversio
#n Adolescent Foster Care of Ohio is accepting applications for foster paren
#ts in our Findlay office. There are many children in Ohio in need of a tem
#porary place to call home. Foster parent training is currently being offere
#d. Please call Stacy for more information 800-824-3007. We look forward t
#o meeting with you. www.diversionfostercare.org <br> <table> <tr>
# <td></td> <td></td> </tr> <tr> <td></td> <td></td> </
#tr> </table> <br><br><ul> <li> Location: Northwest Ohio <li>it's NOT o
#k to contact this poster with services or other commercial interests</ul>
#</div> PostingID: 1048151556<br> <br> <hr> <br> <div class="clfooter">
# Copyright © 2009 craigslist, inc. <a hre
#f="http://www.craigslist.org/about/terms.of.use.html">terms of use</a> 
#; <a href="http://www.craigslist.org/about/privacy_policy"
#>privacy policy</a> <a href="/forums/?forumID=8">fee
#dback forum</a> </div> <script type="text/javascript" src="http://www.craig
#slist.org/js/jquery.js"></script> <script type="text/javascript" src="http:
#//www.craigslist.org/js/postings.js"></script> </body> </html>
import sys
import re
import xml.sax.saxutils
import xml_helpers as xmlh
import crawl_craigslist
from datetime import datetime
import dateutil.parser
CL_LATLONGS = None
def load_craigslist_latlongs():
"""map of craigslist sub-metros to their latlongs."""
global CL_LATLONGS
CL_LATLONGS = {}
latlongs_fh = open('craigslist-metro-latlongs.txt')
for line in latlongs_fh:
line = re.sub(r'\s*#.*$', '', line).strip()
if line == "":
continue
try:
url, lat, lng = line.strip().split("|")
except:
print "error parsing line", line
sys.exit(1)
CL_LATLONGS[url] = lat + "," + lng
latlongs_fh.close()
def extract(instr, rx):
"""find the first instance of rx in instr and strip it of whitespace."""
res = re.findall(rx, instr, re.DOTALL)
if len(res) > 0:
return res[0].strip()
return ""
# pylint: disable-msg=R0915
def parse(instr, maxrecs, progress):
"""return FPXML given craigslist data"""
if CL_LATLONGS == None:
load_craigslist_latlongs()
xmlh.print_progress("loading craigslist crawler output...")
crawl_craigslist.parse_cache_file(instr, listings_only=True)
xmlh.print_progress("loaded "+str(len(crawl_craigslist.pages))+" craigslist pages.")
# convert to footprint format
outstr = '<?xml version="1.0" ?>'
outstr += '<FootprintFeed schemaVersion="0.1">'
outstr += '<FeedInfo>'
outstr += xmlh.output_val('providerID', "105")
outstr += xmlh.output_val('providerName', "craigslist")
outstr += xmlh.output_val('feedID', "craigslist")
outstr += xmlh.output_val('createdDateTime', xmlh.current_ts())
outstr += xmlh.output_val('providerURL', "http://www.craigslist.org/")
outstr += '</FeedInfo>'
numorgs = numopps = 0
# no "organization" in craigslist postings
outstr += '<Organizations>'
outstr += '<Organization>'
outstr += '<organizationID>0</organizationID>'
outstr += '<nationalEIN></nationalEIN>'
outstr += '<name></name>'
outstr += '<missionStatement></missionStatement>'
outstr += '<description></description>'
outstr += '<location>'
outstr += xmlh.output_val("city", "")
outstr += xmlh.output_val("region", "")
outstr += xmlh.output_val("postalCode", "")
outstr += '</location>'
outstr += '<organizationURL></organizationURL>'
outstr += '<donateURL></donateURL>'
outstr += '<logoURL></logoURL>'
outstr += '<detailURL></detailURL>'
outstr += '</Organization>'
numorgs += 1
outstr += '</Organizations>'
skipped_listings = {}
skipped_listings["body"] = skipped_listings["title"] = \
skipped_listings["not-ok"] = 0
outstr += '<VolunteerOpportunities>'
for i, url in enumerate(crawl_craigslist.pages):
page = crawl_craigslist.pages[url]
ok = extract(page, "it's OK to distribute this "+
"charitable volunteerism opportunity")
if ok == "":
skipped_listings["not-ok"] += 1
continue
title = extract(page, "<title>(.+?)</title>")
if title == "":
skipped_listings["title"] += 1
continue
body = extract(page, '<div id="userbody">(.+?)<')
if len(body) < 25:
skipped_listings["body"] += 1
continue
item_id = extract(url, "/vol/(.+?)[.]html$")
locstr = extract(page, "Location: (.+?)<")
datestr = extract(page, "Date: (.+?)<")
ts = dateutil.parser.parse(datestr)
datetimestr = ts.strftime("%Y-%m-%dT%H:%M:%S")
datestr = ts.strftime("%Y-%m-%d")
if (maxrecs>0 and i>maxrecs):
break
xmlh.print_rps_progress("opps", progress, i, maxrecs)
if progress and i > 0 and i % 250 == 0:
msg = "skipped " + str(skipped_listings["title"]+skipped_listings["body"])
msg += " listings ("+str(skipped_listings["title"]) + " for no-title and "
msg += str(skipped_listings["body"]) + " for short body and "
msg += str(skipped_listings["not-ok"]) + " for no-redistrib)"
xmlh.print_progress(msg)
#print "---"
#print "title:",title
#print "loc:",locstr
#print "date:",datestr
#print "body:",body[0:100]
outstr += '<VolunteerOpportunity>'
outstr += '<volunteerOpportunityID>%s</volunteerOpportunityID>' % (item_id)
outstr += '<sponsoringOrganizationIDs><sponsoringOrganizationID>0</sponsoringOrganizationID></sponsoringOrganizationIDs>'
outstr += '<volunteerHubOrganizationIDs><volunteerHubOrganizationID>0</volunteerHubOrganizationID></volunteerHubOrganizationIDs>'
outstr += '<title>%s</title>' % (title)
outstr += '<detailURL>%s</detailURL>' % (url)
# avoid CDATA in body...
esc_body = xml.sax.saxutils.escape(body)
esc_body100 = xml.sax.saxutils.escape(body[0:100])
outstr += '<description>%s</description>' % (esc_body)
outstr += '<abstract>%s</abstract>' % (esc_body100 + "...")
outstr += '<lastUpdated>%s</lastUpdated>' % (datetimestr)
# TODO: expires
# TODO: synthesize location from metro...
outstr += '<locations><location>'
outstr += '<name>%s</name>' % (xml.sax.saxutils.escape(locstr))
# what about the few that do geocode?
lat, lng = "", ""
try:
domain, unused = url.split("vol/")
lat, lng = CL_LATLONGS[domain].split(",")
except:
# ignore for now
#print url
#continue
pass
outstr += '<latitude>%s</latitude>' % (lat)
outstr += '<longitude>%s</longitude>' % (lng)
outstr += '</location></locations>'
#outstr += '<locations><location>'
#outstr += '<city>%s</city>' % (
#outstr += '<region>%s</region>' % (
#outstr += '</location></locations>'
outstr += '<dateTimeDurations><dateTimeDuration>'
outstr += '<openEnded>No</openEnded>'
outstr += '<startDate>%s</startDate>' % (datestr)
# TODO: endDate = startDate + N=14 days?
# TODO: timezone???
#outstr += '<endDate>%s</endDate>' % (
outstr += '</dateTimeDuration></dateTimeDurations>'
# TODO: categories???
#outstr += '<categoryTags>'
outstr += '</VolunteerOpportunity>'
numopps += 1
outstr += '</VolunteerOpportunities>'
outstr += '</FootprintFeed>'
#outstr = re.sub(r'><([^/])', r'>\n<\1', outstr)
return outstr, numorgs, numopps
| Python |
#!/usr/bin/python
# Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#http://usaservice.org/page/event/search_results?orderby=day&state=CA&country=US&event_type%5b%5d=&limit=1000&radius_unit=miles&format=commons_rss&wrap=no
from xml.dom import minidom
import sys
import os
import urllib
import re
import thread
import time
from datetime import datetime
import socket
DEFAULT_TIMEOUT = 30
socket.setdefaulttimeout(DEFAULT_TIMEOUT)
STATES = ['AA','AE','AK','AL','AP','AR','AS','AZ','CA','CO','CT','DC','DE','FL','FM','GA','GU','HI','IA','ID','IL','IN','KS','KY','LA','MA','MD','ME','MH','MI','MN','MO','MP','MS','MT','NC','ND','NE','NH','NJ','NM','NV','NY','OH','OK','OR','PA','PR','PW','RI','SC','SD','TN','TX','UT','VA','VI','VT','WA','WI','WV','WY','AB','BC','MB','NB','NL','NT','NS','NU','ON','PE','QC','SK','YT','na']
OUTPUT_FN = "usaservice.txt"
file_lock = thread.allocate_lock()
crawlers = 0
crawlers_lock = thread.allocate_lock()
def get_url(state):
url = "http://usaservice.org/page/event/search_results?orderby=day&state="
url += state+"&country=US&event_type%5b%5d=&limit=1000&radius_unit=miles&format=commons_rss&wrap=no"
return url
def crawl_state(state, ignore):
global crawlers, crawlers_lock, OUTPUT_FN, file_lock
crawlers_lock.acquire()
crawlers = crawlers + 1
crawlers_lock.release()
while crawlers > 10:
time.sleep(1)
try:
url = get_url(state)
fh = urllib.urlopen(url)
rss = fh.read()
fh.close()
items = re.findall(r'<item>.+?</item>', rss, re.DOTALL)
if len(items) > 0:
print datetime.now(), "found", len(items), "items for state", state
outstr = ""
for item in items:
item = re.sub(r'(?:\r?\n|\r)',' ', item)
if re.search(r'Find Money For Next 12 Months', item):
continue
outstr += item + "\n"
file_lock.acquire()
outfh = open(OUTPUT_FN, "a")
outfh.write(outstr)
outfh.close()
file_lock.release()
except:
pass
crawlers_lock.acquire()
crawlers = crawlers - 1
crawlers_lock.release()
from optparse import OptionParser
if __name__ == "__main__":
try:
os.unlink(OUTPUT_FN)
except:
pass
for state in STATES:
thread.start_new_thread(crawl_state, (state, "foo"))
# give them a chance to start
time.sleep(1)
while (crawlers > 0):
print datetime.now(), "waiting for", crawlers, "crawlers to finish."
time.sleep(1)
sys.exit(0)
| Python |
#!/usr/bin/python
# Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Geocoder and address functions for backend, using Google Maps API.
"""
import re
import time
import urllib
import xml_helpers as xmlh
from datetime import datetime
# Show status messages (also applies to xml parsing)
SHOW_PROGRESS = False
# Show detailed debug messages (just for the geocoder)
GEOCODE_DEBUG = False
def print_debug(msg):
"""print debug message."""
if GEOCODE_DEBUG:
print datetime.now(), msg
def normalize_cache_key(query):
"""Simplifies the query for better matching in the cache."""
query = re.sub(r'\\[tnrfv]', r' ', query)
query = re.sub(r'\s\s+', r' ', query)
query = query.lower().strip()
return query
def filter_cache_delimiters(s):
for delim in (r'\n', r'\|', r';'):
s = re.sub(delim, r' ', s)
return s
GEOCODE_CACHE = None
GEOCODE_CACHE_FN = "geocode_cache.txt"
def geocode(query):
"""Looks up a location query using GMaps API with a local cache and
returns: address, latitude, longitude, accuracy (as strings). On
failure, returns False.
Accuracy levels:
7-9 = street address, 6 = road, 5 = zip code
4 = city, 3 = county, 2 = state, 1 = country"""
global GEOCODE_CACHE
query = filter_cache_delimiters(query)
# load the cache
if GEOCODE_CACHE == None:
GEOCODE_CACHE = {}
geocode_fh = open(GEOCODE_CACHE_FN, "r")
try:
for line in geocode_fh:
# Cache line format is:
# query|address;latitude;longitude;accuracy
# For example:
# ca|California;36.7782610;-119.4179324;2
# Or, if the location can't be found:
# Any city anywhere|
if "|" in line:
key, result = line.strip().split("|")
key = normalize_cache_key(key)
if ";" in result:
result = tuple(result.split(";"))
else:
result = False
GEOCODE_CACHE[key] = result
if len(GEOCODE_CACHE) % 250 == 0:
print_debug("read " + str(len(GEOCODE_CACHE)) +
" geocode cache entries.")
finally:
geocode_fh.close()
# try the cache
key = normalize_cache_key(query)
if key in GEOCODE_CACHE:
return GEOCODE_CACHE[key]
# call Google Maps API
result = geocode_call(query)
print_debug("geocode result: " + str(result))
if result == False:
return False # do not cache
# cache the result
if result == None:
result = False
cacheline = query + "|"
else:
result = map(filter_cache_delimiters, result)
cacheline = query + "|" + ";".join(result)
geocode_fh = open(GEOCODE_CACHE_FN, "a")
xmlh.print_progress("storing cacheline: "+cacheline, "", SHOW_PROGRESS)
geocode_fh.write(cacheline + "\n")
geocode_fh.close()
GEOCODE_CACHE[key] = result
return result
def geocode_call(query, retries=4):
"""Queries the Google Maps geocoder and returns: address, latitude,
longitude, accuracy (as strings). Returns None if the query is
invalid (result can be cached). Returns False on error (do not
cache)."""
if retries < 0:
print_debug("geocoder retry limit exceeded")
return False
print_debug("geocoding '" + query + "'...")
params = urllib.urlencode(
{'q':query, 'output':'xml',
'oe':'utf8', 'sensor':'false',
'key':'ABQIAAAAxq97AW0x5_CNgn6-nLxSrxQuOQhskTx7t90ovP5xOuY' + \
'_YrlyqBQajVan2ia99rD9JgAcFrdQnTD4JQ'})
try:
maps_fh = urllib.urlopen("http://maps.google.com/maps/geo?%s" % params)
res = maps_fh.read()
maps_fh.close()
except IOError, err:
print_debug("Error contacting Maps API. Sleeping. " + str(err))
time.sleep(1)
return geocode_call(query, retries - 1)
print_debug("response length: "+str(len(res)))
node = xmlh.simple_parser(res, [], SHOW_PROGRESS)
respcode = xmlh.get_tag_val(node, "code")
if respcode == "":
print_debug("unparseable response: "+res)
return False
respcode = int(respcode)
if respcode in (400, 601, 602, 603): # problem with the query
return None
if respcode in (500, 620): # problem with the server
print_debug("Connection problem or quota exceeded. Sleeping...")
time.sleep(1)
return geocode_call(query, retries - 1)
if respcode != 200:
return False
# TODO(danyq): if the query is a lat/lng, find the city-level
# address, not just the first one.
addr = xmlh.get_tag_val(node, "address")
# TODO(danyq): Return street/city/country fields separately so that
# the frontend can decide what to display. For now, this hack just
# removes "USA" from all addresses.
addr = re.sub(r', USA$', r'', addr)
coords = xmlh.get_tag_val(node, "coordinates")
if coords == "":
coords = "0.0,0.0,0"
# Note the order: maps API returns "longitude,latitude,altitude"
lng, lat = coords.split(",")[:2]
accuracy = xmlh.get_tag_attr(node, "AddressDetails", "Accuracy")
if accuracy == "":
accuracy = "0"
return (addr, lat, lng, accuracy)
| Python |
#!/usr/bin/env python
import sys
import re
lineno = 0
for line in sys.stdin:
line = re.sub(r'[\r\n]+$', "", line)
lineno += 1
fields = line.split("\t")
outstr = str(lineno) + "\t"
outstr += fields[7] + "\t"
outstr += "url:"+fields[30] + "\n"
outstr += fields[13] + "\n"
outstr += fields[45] + "\t"
outstr += "(" + fields[52] + "," + fields[53] + ", " + fields[54] + ")"
print outstr
| Python |
#!/usr/bin/python
# Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
main() for the crawling/parsing/loading pipeline
"""
#from xml.dom.ext import PrettyPrint
import gzip
import hashlib
import urllib
import re
from datetime import datetime
import geocoder
import parse_footprint
import parse_gspreadsheet as pgs
import parse_usaservice
import parse_networkforgood
import parse_idealist
import parse_craigslist
import parse_volunteermatch
import subprocess
import sys
import time
import xml_helpers as xmlh
from optparse import OptionParser
import dateutil
import dateutil.tz
import dateutil.parser
FIELDSEP = "\t"
RECORDSEP = "\n"
MAX_ABSTRACT_LEN = 300
DEBUG = False
PROGRESS = False
PRINTHEAD = False
ABRIDGED = False
OUTPUTFMT = "fpxml"
# set a nice long timeout
import socket
socket.setdefaulttimeout(600.0)
# pick a latlng that'll never match real queries
UNKNOWN_LAT = UNKNOWN_LNG = "-10"
UNKNOWN_LATLNG = UNKNOWN_LAT + "," + UNKNOWN_LNG
# pick a latlng that'll never match real queries
LOCATIONLESS_LAT = LOCATIONLESS_LNG = "0"
LOCATIONLESS_LATLNG = LOCATIONLESS_LAT + "," + LOCATIONLESS_LNG
HEADER_ALREADY_OUTPUT = False
#BASE_PUB_URL = "http://change.gov/"
BASE_PUB_URL = "http://adamsah.net/"
SEARCHFIELDS = {
# required
"description":"builtin",
"event_date_range":"builtin",
"link":"builtin",
"location":"builtin",
"title":"builtin",
# needed for search restricts
"latitude":"float",
"longitude":"float",
# needed for query by time-of-day
"startTime":"integer",
"endTime":"integer",
# needed for basic search results
"id":"builtin",
"detailURL":"URL",
"abstract":"string",
"location_string":"string",
"feed_providerName":"string",
}
FIELDTYPES = {
"title":"builtin",
"description":"builtin",
"link":"builtin",
"event_type":"builtin",
"quantity":"builtin",
"image_link":"builtin",
"event_date_range":"builtin",
"id":"builtin",
"location":"builtin",
"paid":"boolean",
"openended":"boolean",
"volunteersSlots":"integer",
"volunteersFilled":"integer",
"volunteersNeeded":"integer",
"minimumAge":"integer",
"startTime":"integer",
"endTime":"integer",
"latitude":"float",
"longitude":"float",
"providerURL":"URL",
"detailURL":"URL",
"org_organizationURL":"URL",
"org_logoURL":"URL",
"org_providerURL":"URL",
"feed_providerURL":"URL",
"lastUpdated":"dateTime",
"expires":"dateTime",
"feed_createdDateTime":"dateTime",
# note: type "location" isn"t safe because the Base geocoder can fail,
# causing the record to be rejected
"duration":"string",
"abstract":"string",
"sexRestrictedTo":"string",
"skills":"string",
"contactName":"string",
"contactPhone":"string",
"contactEmail":"string",
"language":"string",
"org_name":"string",
"org_missionStatement":"string",
"org_description":"string",
"org_phone":"string",
"org_fax":"string",
"org_email":"string",
"categories":"string",
"audiences":"string",
"commitmentHoursPerWeek":"string",
"employer":"string",
"feed_providerName":"string",
"feed_description":"string",
"providerID":"string",
"feed_providerID":"string",
"feedID":"string",
"opportunityID":"string",
"organizationID":"string",
"sponsoringOrganizationID":"strng",
"volunteerHubOrganizationID":"string",
"org_nationalEIN":"string",
"org_guidestarID":"string",
"venue_name":"string",
"location_string":"string",
"orgLocation":"string",
}
def print_progress(msg, filename="", progress=None):
"""print progress indicator."""
# not allowed to say progress=PROGRESS as a default arg
if progress == None:
progress = PROGRESS
xmlh.print_progress(msg, filename, progress=progress)
def print_status(msg, filename="", progress=None):
"""print status indicator, for stats collection."""
if progress == None:
progress = PROGRESS
xmlh.print_status(msg, filename, progress=progress)
def print_debug(msg):
"""print debug message."""
if DEBUG:
print datetime.now(), msg
# Google Base uses ISO8601... in PST -- I kid you not:
# http://base.google.com/support/bin/answer.py?
# answer=78170&hl=en#Events%20and%20Activities
# and worse, you have to change an env var in python...
def convert_dt_to_gbase(datestr, timestr, timezone):
"""converts dates like YYYY-MM-DD, times like HH:MM:SS and
timezones like America/New_York, into Google Base format."""
try:
tzinfo = dateutil.tz.tzstr(timezone)
except:
tzinfo = dateutil.tz.tzutc()
try:
timestr = dateutil.parser.parse(datestr + " " + timestr)
except:
print "error parsing datetime: "+datestr+" "+timestr
return ""
timestr = timestr.replace(tzinfo=tzinfo)
pst = dateutil.tz.tzstr("PST8PDT")
timestr = timestr.astimezone(pst)
if timestr.year < 1900:
timestr = timestr.replace(year=timestr.year+1900)
res = timestr.strftime("%Y-%m-%dT%H:%M:%S")
res = re.sub(r'Z$', '', res)
return res
CSV_REPEATED_FIELDS = ['categories', 'audiences']
DIRECT_MAP_FIELDS = [
'opportunityID', 'organizationID', 'volunteersSlots', 'volunteersFilled',
'volunteersNeeded', 'minimumAge', 'sexRestrictedTo', 'skills', 'contactName',
'contactPhone', 'contactEmail', 'providerURL', 'language', 'lastUpdated',
'expires', 'detailURL']
ORGANIZATION_FIELDS = [
'nationalEIN', 'guidestarID', 'name', 'missionStatement', 'description',
'phone', 'fax', 'email', 'organizationURL', 'logoURL', 'providerURL']
def flattener_value(node):
"""return a DOM node's first child, sans commas"""
if (node.firstChild != None):
return node.firstChild.data.replace(",", "")
else:
return ""
def flatten_to_csv(domnode):
"""prints the children of a DOM node as CSV separated strings"""
# pylint: disable-msg=W0141
return ",".join(filter(lambda x: x != "",
map(flattener_value, domnode.childNodes)))
def output_field(name, value):
"""print a field value, handling long strings, header lines and
custom datatypes."""
#global PRINTHEAD, DEBUG
if PRINTHEAD:
if name not in FIELDTYPES:
print datetime.now(), "no type for field: " + name + FIELDTYPES[name]
sys.exit(1)
elif FIELDTYPES[name] == "builtin":
return name
elif OUTPUTFMT == "basetsv":
return "c:"+name+":"+FIELDTYPES[name]
else:
return name+":"+FIELDTYPES[name]
if OUTPUTFMT == "basetsv":
# grr: Base tries to treat commas in custom fields as being lists ?!
# http://groups.google.com/group/base-help-basics/browse_thread/thread/
# c4f51447191a6741
# TODO: note that this may cause fields to expand beyond their maxlen
# (e.g. abstract)
value = re.sub(r',', ';;', value)
if DEBUG:
if (len(value) > 70):
value = value[0:67] + "... (" + str(len(value)) + " bytes)"
return name.rjust(22) + " : " + value
if (FIELDTYPES[name] == "dateTime"):
return convert_dt_to_gbase(value, "", "UTC")
return value
def get_addr_field(node, field):
"""assuming a node is named (field), return it with optional trailing spc."""
addr = xmlh.get_tag_val(node, field)
if addr != "":
addr += " "
return addr
def get_city_addr_str(node):
"""synthesize a city-region-postal-country string."""
# note: avoid commas, so it works with CSV
loc = ""
loc += get_addr_field(node, "city")
loc += get_addr_field(node, "region")
loc += get_addr_field(node, "postalCode")
loc += get_addr_field(node, "country")
return loc
def get_street_addr_str(node):
"""concatenate street address fields"""
loc = get_addr_field(node, "streetAddress1")
loc += get_addr_field(node, "streetAddress2")
loc += get_addr_field(node, "streetAddress3")
return loc
def get_full_addr_str(node):
"""concatenate street address and city/region/postal/country fields"""
loc = get_street_addr_str(node)
loc += get_city_addr_str(node)
return loc
def find_geocoded_location(node):
"""Try a multitude of field combinations to get a geocode. Returns:
raw_location, address, latitude, longitude, accuracy (as strings)."""
# Combinations of fields to try geocoding.
field_combinations = \
["streetAddress1,streetAddress2,streetAddress3,"
+ "city,region,postalCode,country",
"streetAddress2,streetAddress3,"
+ "city,region,postalCode,country",
"streetAddress3,city,region,postalCode,country",
"city,region,postalCode,country",
"postalCode,country",
"city,region,country",
"region,country",
"latitude,longitude"]
# Upper bound on the accuracy provided by a given field. This
# prevents false positives like matching the city field to a street
# name.
field_accuracy = { "streetAddress1": 9,
"streetAddress2": 9,
"streetAddress3": 9,
"city": 5,
"region": 5,
"postalCode": 5,
"country": 1,
"latitude": 9,
"longitude": 9 }
for fields in field_combinations:
field_list = fields.split(",")
# Compose the query and find the max accuracy.
query = []
max_accuracy = 0
for field in field_list:
field_val = xmlh.get_tag_val(node, field)
if field_val != "":
query += [field_val]
max_accuracy = max(max_accuracy, field_accuracy[field])
query = ",".join(query)
print_debug("trying: " + query + " (" + str(max_accuracy) + ")")
result = geocoder.geocode(query)
if result:
addr, lat, lng, acc = result
if int(acc) <= max_accuracy:
print_debug("success: " + str(result))
return result
print_debug("incorrect accuracy: " + str(result))
result = (get_full_addr_str(node), "0.0", "0.0", "0")
print_debug("failed: " + str(result))
return result
def output_loc_field(node, mapped_name):
"""macro for output_field( convert node to loc field )"""
return output_field(mapped_name,
get_street_addr_str(node)+get_city_addr_str(node))
def output_tag_value(node, fieldname):
"""macro for output_field( get node value )"""
return output_field(fieldname, xmlh.get_tag_val(node, fieldname))
def output_tag_value_renamed(node, xmlname, newname):
"""macro for output_field( get node value ) then emitted as newname"""
return output_field(newname, xmlh.get_tag_val(node, xmlname))
def compute_stable_id(opp, org, locstr, openended, duration,
hrs_per_week, startend):
"""core algorithm for computing an opportunity's unique id."""
if DEBUG:
print "opp=" + str(opp) # shuts up pylint
eid = xmlh.get_tag_val(org, "nationalEIN")
if (eid == ""):
# support informal "organizations" that lack EINs
eid = xmlh.get_tag_val(org, "organizationURL")
# TODO: if two providers have same listing, good odds the
# locations will be slightly different...
loc = locstr
# TODO: if two providers have same listing, the time info
# is unlikely to be exactly the same, incl. missing fields
timestr = openended + duration + hrs_per_week + startend
title = get_title(opp)
abstract = get_abstract(opp)
detailURL = xmlh.get_tag_val(opp, 'detailURL')
hashstr = "\t".join([eid, loc, timestr, title, abstract, detailURL])
return hashlib.md5(hashstr).hexdigest()
def get_abstract(opp):
"""process abstract-- shorten, strip newlines and formatting.
TODO: cache/memoize this."""
abstract = xmlh.get_tag_val(opp, "abstract")
if abstract == "":
abstract = xmlh.get_tag_val(opp, "description")
abstract = cleanse_snippet(abstract)
return abstract[:MAX_ABSTRACT_LEN]
def get_direct_mapped_fields(opp, org):
"""map a field directly from FPXML to Google Base."""
outstr = output_field("abstract", get_abstract(opp))
if ABRIDGED:
return outstr
paid = xmlh.get_tag_val(opp, "paid")
if (paid == "" or paid.lower()[0] != "y"):
paid = "n"
else:
paid = "y"
outstr += FIELDSEP + output_field("paid", paid)
for field in DIRECT_MAP_FIELDS:
outstr += FIELDSEP + output_tag_value(opp, field)
for field in ORGANIZATION_FIELDS:
outstr += FIELDSEP + output_field("org_"+field,
xmlh.get_tag_val(org, field))
for field in CSV_REPEATED_FIELDS:
outstr += FIELDSEP
fieldval = opp.getElementsByTagName(field)
val = ""
if (fieldval.length > 0):
val = flatten_to_csv(fieldval[0])
outstr += output_field(field, val)
# orgLocation
outstr += FIELDSEP
fieldval = opp.getElementsByTagName("orgLocation")
if (fieldval.length > 0):
outstr += output_loc_field(fieldval[0], "orgLocation")
else:
outstr += output_field("orgLocation", "")
return outstr
def get_base_other_fields(opp, org):
"""These are fields that exist in other Base schemas-- for the sake of
possible syndication, we try to make ourselves look like other Base
feeds. Since we're talking about a small overlap, these fields are
populated *as well as* direct mapping of the footprint XML fields."""
outstr = output_field("employer", xmlh.get_tag_val(org, "name"))
if ABRIDGED:
return outstr
outstr += FIELDSEP + output_field("quantity",
xmlh.get_tag_val(opp, "volunteersNeeded"))
outstr += FIELDSEP + output_field("image_link",
xmlh.get_tag_val(org, "logoURL"))
# don't map expiration_date -- Base has strict limits (e.g. 2 weeks)
return outstr
sent_start_rx = re.compile(r'((^\s*|[.]\s+)[A-Z])([A-Z0-9 ,;-]{13,})')
def cleanse_snippet(instr):
# convert known XML/XHTML chars
instr = re.sub(r' ', ' ', instr)
instr = re.sub(r'"', '"', instr)
instr = re.sub(r'&(uml|middot|ndash|bull|mdash|hellip);', '-', instr)
# strip \n and \b
instr = re.sub(r'(\\[bn])+', ' ', instr)
# doubly-escaped HTML
instr = re.sub(r'&lt;.+?&gt;', '', instr)
instr = re.sub(r'&(amp;)+([a-z]+);', r'&\2;', instr)
instr = re.sub(r'&#\d+;', '', instr)
# singly-escaped HTML
# </p>, <br/>
instr = re.sub(r'</?[a-zA-Z]+?/?>', '', instr)
# <a href=...>, <font ...>
instr = re.sub(r'<?(font|a|p|img)[^&]*/?>', '', instr, re.IGNORECASE)
# strip leftover XML escaped chars
instr = re.sub(r'&([a-z]+|#[0-9]+);', '', instr)
# strip repeated spaces, so maxlen works
instr = re.sub(r'\s+', ' ', instr)
# fix obnoxious all caps titles and snippets
for str in re.finditer(sent_start_rx, instr):
instr = re.sub(sent_start_rx, str.group(1)+str.group(3).lower(), instr, 1)
return instr
def get_title(opp):
"""compute a clean title. TODO: do this once and cache/memoize it"""
title = cleanse_snippet(output_tag_value(opp, "title"))
for str in re.finditer(lcword_rx, title):
title = re.sub(lcword_rx, str.group(1)+str.group(2).upper(), title, 1)
return title
lcword_rx = re.compile(r'(\s)([a-z])')
def get_event_reqd_fields(opp):
"""Fields required by Google Base, note that they aren't necessarily
used by the FP app."""
outstr = get_title(opp)
outstr += FIELDSEP + output_tag_value(opp, "description")
outstr += FIELDSEP + output_field("link", BASE_PUB_URL)
return outstr
def get_feed_fields(feedinfo):
"""Fields from the <Feed> portion of FPXML."""
outstr = output_tag_value_renamed(feedinfo,
"providerName", "feed_providerName")
if ABRIDGED:
return outstr
outstr += FIELDSEP + output_tag_value(feedinfo, "feedID")
outstr += FIELDSEP + output_tag_value_renamed(
feedinfo, "providerID", "feed_providerID")
outstr += FIELDSEP + output_tag_value_renamed(
feedinfo, "providerURL", "feed_providerURL")
outstr += FIELDSEP + output_tag_value_renamed(
feedinfo, "description", "feed_description")
outstr += FIELDSEP + output_tag_value_renamed(
feedinfo, "createdDateTime", "feed_createdDateTime")
return outstr
def output_opportunity(opp, feedinfo, known_orgs, totrecs):
"""main function for outputting a complete opportunity."""
outstr = ""
opp_id = xmlh.get_tag_val(opp, "volunteerOpportunityID")
if (opp_id == ""):
print_progress("no opportunityID")
return totrecs, ""
org_id = xmlh.get_tag_val(opp, "sponsoringOrganizationID")
if (org_id not in known_orgs):
print_progress("unknown sponsoringOrganizationID: " +\
org_id + ". skipping opportunity " + opp_id)
return totrecs, ""
org = known_orgs[org_id]
opp_locations = opp.getElementsByTagName("location")
opp_times = opp.getElementsByTagName("dateTimeDuration")
repeated_fields = get_repeated_fields(feedinfo, opp, org)
if len(opp_times) == 0:
opp_times = [ None ]
for opptime in opp_times:
if opptime == None:
startend = convert_dt_to_gbase("1971-01-01", "00:00:00-00:00", "UTC")
starttime = "0000"
endtime = "2359"
openended = "Yes"
else:
# event_date_range
# e.g. 2006-12-20T23:00:00/2006-12-21T08:30:00, in PST (GMT-8)
start_date = xmlh.get_tag_val(opptime, "startDate")
start_time = xmlh.get_tag_val(opptime, "startTime")
end_date = xmlh.get_tag_val(opptime, "endDate")
end_time = xmlh.get_tag_val(opptime, "endTime")
openended = xmlh.get_tag_val(opptime, "openEnded")
# e.g. 2006-12-20T23:00:00/2006-12-21T08:30:00, in PST (GMT-8)
if (start_date == ""):
start_date = "1971-01-01"
start_time = "00:00:00-00:00"
startend = convert_dt_to_gbase(start_date, start_time, "UTC")
if (end_date != "" and end_date + end_time > start_date + start_time):
endstr = convert_dt_to_gbase(end_date, end_time, "UTC")
startend += "/" + endstr
duration = xmlh.get_tag_val(opptime, "duration")
hrs_per_week = xmlh.get_tag_val(opptime, "commitmentHoursPerWeek")
time_fields = get_time_fields(openended, duration, hrs_per_week, startend)
if len(opp_locations) == 0:
opp_locations = [ None ]
for opploc in opp_locations:
totrecs = totrecs + 1
if PROGRESS and totrecs % 250 == 0:
print_progress(str(totrecs)+" records generated.")
if opploc == None:
locstr, latlng, geocoded_loc = ("", "", "")
loc_fields = get_loc_fields("0.0", "0.0", "0.0", "", "")
else:
locstr = get_full_addr_str(opploc)
addr, lat, lng, acc = find_geocoded_location(opploc)
loc_fields = get_loc_fields("", str(float(lat)+1000.0),
str(float(lng)+1000.0), addr,
xmlh.get_tag_val(opploc, "name"))
opp_id = compute_stable_id(opp, org, locstr, openended, duration,
hrs_per_week, startend)
outstr += output_field("id", opp_id)
outstr += repeated_fields
outstr += time_fields
outstr += loc_fields
outstr += RECORDSEP
return totrecs, outstr
def get_time_fields(openended, duration, hrs_per_week, event_date_range):
"""output time-related fields, e.g. for multiple times per event."""
# 2010-02-26T16:00:00/2010-02-26T16:00:00
match = re.search(r'T(\d\d):(\d\d):\d\d(\s*/\s*.+?T(\d\d):(\d\d):\d\d)?',
event_date_range)
startstr = endstr = ""
if match:
if match.group(2):
startstr = match.group(1) + match.group(2)
else:
# TODO: exception (but need a way to throw exceptions in general)
# e.g. ignore this record, stop this feed, etc.
pass
if match.group(3):
endstr = match.group(4) + match.group(5)
time_fields = FIELDSEP + output_field("event_date_range", event_date_range)
time_fields += FIELDSEP + output_field("startTime", startstr)
time_fields += FIELDSEP + output_field("endTime", endstr)
if ABRIDGED:
return time_fields
time_fields += FIELDSEP + output_field("openended", openended)
time_fields += FIELDSEP + output_field("duration", duration)
time_fields += FIELDSEP + output_field("commitmentHoursPerWeek", hrs_per_week)
return time_fields
def get_loc_fields(location, latitude, longitude, location_string,
venue_name):
"""output location-related fields, e.g. for multiple locations per event."""
loc_fields = FIELDSEP + output_field("location", location)
loc_fields += FIELDSEP + output_field("latitude", latitude)
loc_fields += FIELDSEP + output_field("longitude", longitude)
loc_fields += FIELDSEP + output_field("location_string", location_string)
if ABRIDGED:
return loc_fields
loc_fields += FIELDSEP + output_field("venue_name", venue_name)
return loc_fields
def get_repeated_fields(feedinfo, opp, org):
"""output all fields that are repeated for each time and location."""
repeated_fields = FIELDSEP + get_feed_fields(feedinfo)
repeated_fields += FIELDSEP + get_event_reqd_fields(opp)
repeated_fields += FIELDSEP + get_base_other_fields(opp, org)
repeated_fields += FIELDSEP + get_direct_mapped_fields(opp, org)
return repeated_fields
def output_header(feedinfo, opp, org):
"""fake opportunity printer, which prints the header line instead."""
global PRINTHEAD, HEADER_ALREADY_OUTPUT
# no matter what, only print the header once!
if HEADER_ALREADY_OUTPUT:
return ""
HEADER_ALREADY_OUTPUT = True
PRINTHEAD = True
outstr = output_field("id", "")
repeated_fields = get_repeated_fields(feedinfo, opp, org)
time_fields = get_time_fields("", "", "", "")
loc_fields = get_loc_fields("", "", "", "", "")
PRINTHEAD = False
return outstr + repeated_fields + time_fields + loc_fields + RECORDSEP
def convert_to_footprint_xml(instr, do_fastparse, maxrecs, progress):
"""macro for parsing an FPXML string to XML then format it."""
#if False:
# # grr: RAM explosion, even with pulldom...
# totrecs = 0
# nodes = xml.dom.pulldom.parseString(instr)
# outstr = '<?xml version="1.0" ?>'
# outstr += '<FootprintFeed schemaVersion="0.1">'
# for eltype, node in nodes:
# if eltype == 'START_ELEMENT':
# if node.nodeName == 'VolunteerOpportunity':
# if progress and totrecs > 0 and totrecs % 250 == 0:
# print datetime.now(), ": ", totrecs, " opps processed."
# totrecs = totrecs + 1
# if maxrecs > 0 and totrecs > maxrecs:
# break
# if (node.nodeName == 'FeedInfo' or
# node.nodeName == 'Organization' or
# node.nodeName == 'VolunteerOpportunity'):
# nodes.expandNode(node)
# prettyxml = xmlh.prettyxml(node)
# outstr += prettyxml
# outstr += '</FootprintFeed>'
# return outstr
if do_fastparse:
res, numorgs, numopps = parse_footprint.parse_fast(instr, maxrecs, progress)
return res
else:
# slow parse
xmldoc = parse_footprint.parse(instr, maxrecs, progress)
# TODO: maxrecs
return xmlh.prettyxml(xmldoc)
def convert_to_gbase_events_type(instr, origname, fastparse, maxrecs, progress):
"""non-trivial logic for converting FPXML to google base formatting."""
# todo: maxrecs
outstr = ""
print_progress("convert_to_gbase_events_type...", "", progress)
example_org = None
known_orgs = {}
if fastparse:
known_elnames = [
'FeedInfo', 'FootprintFeed', 'Organization', 'Organizations',
'VolunteerOpportunities', 'VolunteerOpportunity', 'abstract',
'audienceTag', 'audienceTags', 'categoryTag', 'categoryTags',
'city', 'commitmentHoursPerWeek', 'contactEmail', 'contactName',
'contactPhone', 'country', 'createdDateTime', 'dateTimeDuration',
'dateTimeDurationType', 'dateTimeDurations', 'description',
'detailURL', 'directions', 'donateURL', 'duration', 'email',
'endDate', 'endTime', 'expires', 'fax', 'feedID', 'guidestarID',
'iCalRecurrence', 'language', 'latitude', 'lastUpdated', 'location',
'locationType', 'locations', 'logoURL', 'longitude', 'minimumAge',
'missionStatement', 'name', 'nationalEIN', 'openEnded',
'organizationID', 'organizationURL', 'paid', 'phone', 'postalCode',
'providerID', 'providerName', 'providerURL', 'region', 'schemaVersion',
'sexRestrictedEnum', 'sexRestrictedTo', 'skills',
'sponsoringOrganizationID', 'startDate', 'startTime', 'streetAddress1',
'streetAddress2', 'streetAddress3', 'title', 'tzOlsonPath', 'virtual',
'volunteerHubOrganizationID', 'volunteerOpportunityID',
'volunteersFilled', 'volunteersSlots', 'volunteersNeeded', 'yesNoEnum'
]
numopps = 0
feedchunks = re.findall(
re.compile('<FeedInfo>.+?</FeedInfo>', re.DOTALL), instr)
for feedchunk in feedchunks:
print_progress("found FeedInfo.", progress=progress)
feedinfo = xmlh.simple_parser(feedchunk, known_elnames, False)
orgchunks = re.findall(
re.compile('<Organization>.+?</Organization>', re.DOTALL), instr)
for orgchunk in orgchunks:
if progress and len(known_orgs) % 250 == 0:
print_progress(str(len(known_orgs))+" organizations seen.")
org = xmlh.simple_parser(orgchunk, known_elnames, False)
org_id = xmlh.get_tag_val(org, "organizationID")
if (org_id != ""):
known_orgs[org_id] = org
if example_org == None:
example_org = org
oppchunks = re.findall(
re.compile('<VolunteerOpportunity>.+?</VolunteerOpportunity>',
re.DOTALL), instr)
for oppchunk in oppchunks:
opp = xmlh.simple_parser(oppchunk, None, False)
if not HEADER_ALREADY_OUTPUT:
outstr = output_header(feedinfo, opp, example_org)
numopps, spiece = output_opportunity(opp, feedinfo, known_orgs, numopps)
outstr += spiece
if (maxrecs > 0 and numopps > maxrecs):
break
## note: preserves order, so diff works (vs. one sweep per element type)
#chunks = re.findall(
# re.compile('<(?:Organization|VolunteerOpportunity|FeedInfo)>.+?'+
# '</(?:Organization|VolunteerOpportunity|FeedInfo)>',
# re.DOTALL), instr)
#for chunk in chunks:
# node = xmlh.simple_parser(chunk, known_elnames, False)
# if re.search("<FeedInfo>", chunk):
# print_progress("found FeedInfo.", progress=progress)
# feedinfo = xmlh.simple_parser(chunk, known_elnames, False)
# continue
# if re.search("<Organization>", chunk):
# if progress and len(known_orgs) % 250 == 0:
# print_progress(str(len(known_orgs))+" organizations seen.")
# org = xmlh.simple_parser(chunk, known_elnames, False)
# org_id = xmlh.get_tag_val(org, "organizationID")
# if (org_id != ""):
# known_orgs[org_id] = org
# if example_org == None:
# example_org = org
# continue
# if re.search("<VolunteerOpportunity>", chunk):
# global HEADER_ALREADY_OUTPUT
# opp = xmlh.simple_parser(chunk, None, False)
# if numopps == 0:
# # reinitialize
# outstr = output_header(feedinfo, node, example_org)
# numopps, spiece = output_opportunity(opp, feedinfo, known_orgs, numopps)
# outstr += spiece
# if (maxrecs > 0 and numopps > maxrecs):
# break
#numopps = 0
#nodes = xml.dom.pulldom.parseString(instr)
#example_org = None
#for type,node in nodes:
# if type == 'START_ELEMENT':
# if node.nodeName == 'FeedInfo':
# nodes.expandNode(node)
# feedinfo = node
# elif node.nodeName == 'Organization':
# nodes.expandNode(node)
# id = xmlh.get_tag_val(node, "organizationID")
# if (id != ""):
# known_orgs[id] = node
# if example_org == None:
# example_org = node
# elif node.nodeName == 'VolunteerOpportunity':
# nodes.expandNode(node)
# if numopps == 0:
# outstr += output_header(feedinfo, node, example_org)
# numopps, spiece = output_opportunity(node, feedinfo,
# known_orgs, numopps)
# outstr += spiece
else:
# not fastparse
footprint_xml = parse_footprint.parse(instr, maxrecs, progress)
feedinfos = footprint_xml.getElementsByTagName("FeedInfo")
if (feedinfos.length != 1):
print datetime.now(), "bad FeedInfo: should only be one section"
# TODO: throw error
sys.exit(1)
feedinfo = feedinfos[0]
organizations = footprint_xml.getElementsByTagName("Organization")
for org in organizations:
org_id = xmlh.get_tag_val(org, "organizationID")
if (org_id != ""):
known_orgs[org_id] = org
opportunities = footprint_xml.getElementsByTagName("VolunteerOpportunity")
numopps = 0
for opp in opportunities:
if numopps == 0:
outstr += output_header(feedinfo, opp, organizations[0])
numopps, spiece = output_opportunity(opp, feedinfo, known_orgs, numopps)
outstr += spiece
return outstr, len(known_orgs), numopps
def guess_shortname(filename):
"""from the input filename, guess which feed this is."""
if re.search("usa-?service", filename):
return "usaservice"
if re.search(r'meetup', filename):
return "meetup"
if re.search(r'barackobama', filename):
return "mybarackobama"
if re.search(r'united.*way', filename):
return "unitedway"
if re.search(r'americanredcross', filename):
return "americanredcross"
if re.search(r'citizencorps', filename):
return "citizencorps"
if re.search(r'ymca', filename):
return "ymca"
if re.search("habitat", filename):
return "habitat"
if re.search("americansolutions", filename):
return "americansolutions"
if re.search("spreadsheets[.]google[.]com", filename):
return "gspreadsheet"
if re.search("(handson|hot.footprint)", filename):
return "handsonnetwork"
if re.search("(volunteer[.]?gov)", filename):
return "volunteergov"
if re.search("(whichoneis.com|beextra|extraordinari)", filename):
return "extraordinaries"
if re.search("idealist", filename):
return "idealist"
if re.search("(userpostings|/export/Posting)", filename):
return "footprint_userpostings"
if re.search("craigslist", filename):
return "craigslist"
if re.search("americorps", filename):
return "americorps"
if re.search("givingdupage", filename):
return "givingdupage"
if re.search("mlk(_|day)", filename):
return "mlk_day"
if re.search("servenet", filename):
return "servenet"
if re.search(r'(seniorcorps|985148b9e3c5b9523ed96c33de482e3d)', filename):
# note: has to come before volunteermatch
return "seniorcorps"
if re.search(r'(volunteermatch|cfef12bf527d2ec1acccba6c4c159687)', filename):
return "volunteermatch"
if re.search("christianvol", filename):
return "christianvolunteering"
if re.search("volunteer(two|2)", filename):
return "volunteertwo"
if re.search("mentorpro", filename):
return "mentorpro"
if re.search(r'(mpsg_feed|myproj_servegov)', filename):
return "myproj_servegov"
return ""
def ftp_to_base(filename, ftpinfo, instr):
"""ftp the string to base, guessing the feed name from the orig filename."""
ftplib = __import__('ftplib')
stringio = __import__('StringIO')
dest_fn = guess_shortname(filename)
if dest_fn == "":
dest_fn = "footprint1.txt"
else:
dest_fn = dest_fn + "1.gz"
if re.search(r'[.]gz$', dest_fn):
print_progress("compressing data from "+str(len(instr))+" bytes", filename)
gzip_fh = gzip.open(dest_fn, 'wb', 9)
gzip_fh.write(instr)
gzip_fh.close()
data_fh = open(dest_fn, 'rb')
else:
data_fh = stringio.StringIO(instr)
host = 'uploads.google.com'
(user, passwd) = ftpinfo.split(":")
print_progress("connecting to " + host + " as user " + user + "...", filename)
ftp = ftplib.FTP(host)
welcomestr = re.sub(r'\n', '\\n', ftp.getwelcome())
print_progress("FTP server says: "+welcomestr, filename)
ftp.login(user, passwd)
print_progress("uploading filename "+dest_fn, filename)
success = False
while not success:
try:
ftp.storbinary("STOR " + dest_fn, data_fh, 8192)
success = True
except:
# probably ftplib.error_perm: 553: Permission denied on server. (Overwrite)
print_progress("upload failed-- sleeping and retrying...")
time.sleep(1)
if success:
print_progress("done uploading.")
else:
print_progress("giving up.")
ftp.quit()
data_fh.close()
def guess_parse_func(inputfmt, filename):
"""from the filename and the --inputfmt,guess the input type and parse func"""
# for development
if inputfmt == "fpxml":
return "fpxml", parse_footprint.parse_fast
shortname = guess_shortname(filename)
# FPXML providers
fp = parse_footprint
if shortname == "handsonnetwork":
return "fpxml", fp.parser(
'102', 'handsonnetwork', 'handsonnetwork', 'http://handsonnetwork.org/',
'HandsOn Network')
if shortname == "idealist":
return "fpxml", fp.parser(
'103', 'idealist', 'idealist', 'http://www.idealist.org/',
'Idealist')
if shortname == "volunteermatch":
return "fpxml", fp.parser(
'104', 'volunteermatch', 'volunteermatch',
'http://www.volunteermatch.org/', 'Volunteer Match')
if shortname == "volunteergov":
return "fpxml", fp.parser(
'107', 'volunteergov', 'volunteergov', 'http://www.volunteer.gov/',
'volunteer.gov')
if shortname == "extraordinaries":
return "fpxml", fp.parser(
'110', 'extraordinaries', 'extraordinaries', 'http://www.beextra.org/',
'The Extraordinaries')
if shortname == "meetup":
return "fpxml", fp.parser(
'112', 'meetup', 'meetup', 'http://www.meetup.com/',
'Meetup')
if shortname == "americansolutions":
return "fpxml", fp.parser(
'115', 'americansolutions', 'americansolutions',
'http://www.americansolutions.com/',
'American Solutions for Winning the Future')
if shortname == "mybarackobama":
return "fpxml", fp.parser(
'116', 'mybarackobama', 'mybarackobama', 'http://my.barackobama.com/',
'Organizing for America / DNC')
if shortname == "unitedway":
return "fpxml", fp.parser(
'122', 'unitedway', 'unitedway', 'http://www.unitedway.org/',
'United Way')
if shortname == "americanredcross":
return "fpxml", fp.parser(
'123', 'americanredcross', 'americanredcross', 'http://www.givelife.org/',
'American Red Cross')
if shortname == "citizencorps":
return "fpxml", fp.parser(
'124', 'citizencorps', 'citizencorps', 'http://citizencorps.gov/',
'Citizen Corps / FEMA')
if shortname == "ymca":
return "fpxml", fp.parser(
'126', 'ymca', 'ymca', 'http://www.ymca.net/',
'YMCA')
if shortname == "habitat":
parser = fp.parser(
'111', 'habitat', 'habitat',
'http://www.habitat.org/', 'Habitat for Humanity')
def parse_habitat(instr, maxrecs, progress):
# fixup bad escaping
newstr = re.sub(r'&code=', '&code=', instr)
return parser(newstr, maxrecs, progress)
return "habitat", parse_habitat
# networkforgood providers
nfg = parse_networkforgood
if shortname == "americorps":
return "nfg", nfg.parser(
'106', 'americorps', 'americorps', 'http://www.americorps.gov/',
'AmeriCorps')
if shortname == "servenet":
return "nfg", nfg.parser(
'114', 'servenet', 'servenet', 'http://www.servenet.org/',
'servenet')
if shortname == "mlk_day":
return "nfg", nfg.parser(
'115', 'mlk_day', 'mlk_day', 'http://my.mlkday.gov/',
'Martin Luther King day')
if shortname == "christianvolunteering":
return "nfg", nfg.parser(
'117', 'christianvolunteering', 'christianvolunteering',
'http://www.christianvolunteering.org/', 'Christian Volunteering')
if shortname == "volunteertwo":
return "nfg", nfg.parser(
'118', 'volunteer2', 'volunteer2',
'http://www.volunteer2.com/', 'Volunteer2')
if shortname == "mentorpro":
return "nfg", nfg.parser(
'119', 'mentor', 'mentor',
'http://www.mentorpro.org/', 'MENTOR')
if shortname == "myproj_servegov":
return "nfg", nfg.parser(
'120', 'myproj_servegov', 'myproj_servegov',
'http://myproject.serve.gov/', 'MyprojectServeGov')
if shortname == "seniorcorps":
return "nfg", nfg.parser(
'121', 'seniorcorps', 'seniorcorps',
'http://www.seniorcorps.gov/', 'SeniorCorps')
if shortname == "givingdupage":
return "nfg", nfg.parser(
'125', 'givingdupage', 'givingdupage', 'http://www.dupageco.org/',
'Giving Dupage')
# custom formats
if shortname == "gspreadsheet":
return "gspreadsheet", pgs.parse
if shortname == "usaservice" or shortname == "usasvc":
return "usaservice", parse_usaservice.parse
if shortname == "craigslist" or shortname == "cl":
return "craigslist", parse_craigslist.parse
# legacy-- to be safe, remove after 9/1/2009
#if shortname == "volunteermatch" or shortname == "vm":
# return "volunteermatch", parse_volunteermatch.parse
#if shortname == "idealist":
# return "idealist", parse_idealist.parse
print datetime.now(), "couldn't guess input format-- try --inputfmt"
sys.exit(1)
def clean_input_string(instr):
"""run various cleanups for low-level encoding issues."""
def cleaning_progress(msg):
"""macro"""
print_progress(msg+": "+str(len(instr))+" bytes.")
cleaning_progress("read file")
instr = re.sub(r'\r\n?', "\n", instr)
cleaning_progress("filtered DOS newlines")
instr = re.sub(r'(?:\t|	)', " ", instr)
cleaning_progress("filtered tabs")
instr = re.sub(r'\xc2?[\x93\x94\222]', "'", instr)
cleaning_progress("filtered iso8859-1 single quotes")
instr = re.sub(r'\xc2?[\223\224]', '"', instr)
cleaning_progress("filtered iso8859-1 double quotes")
instr = re.sub(r'\xc2?[\225\226\227]', "-", instr)
cleaning_progress("filtered iso8859-1 dashes")
instr = xmlh.clean_string(instr)
cleaning_progress("filtered nonprintables")
return instr
def parse_options():
"""parse cmdline options"""
global DEBUG, PROGRESS, FIELDSEP, RECORDSEP, ABRIDGED, OUTPUTFMT
parser = OptionParser("usage: %prog [options] sample_data.xml ...")
parser.set_defaults(geocode_debug=False)
parser.set_defaults(debug=False)
parser.set_defaults(abridged=False)
parser.set_defaults(progress=False)
parser.set_defaults(debug_input=False)
parser.set_defaults(outputfmt="basetsv")
parser.set_defaults(output="")
parser.set_defaults(compress_output=False)
parser.set_defaults(test=False)
parser.set_defaults(clean=True)
parser.set_defaults(maxrecs=-1)
parser.add_option("-d", "--dbg", action="store_true", dest="debug")
parser.add_option("--abridged", action="store_true", dest="abridged")
parser.add_option("--noabridged", action="store_false", dest="abridged")
parser.add_option("--clean", action="store_true", dest="clean")
parser.add_option("--noclean", action="store_false", dest="clean")
parser.add_option("--inputfmt", action="store", dest="inputfmt")
parser.add_option("--test", action="store_true", dest="test")
parser.add_option("--dbginput", action="store_true", dest="debug_input")
parser.add_option("--progress", action="store_true", dest="progress")
parser.add_option("--outputfmt", action="store", dest="outputfmt")
parser.add_option("--output", action="store", dest="output")
parser.add_option("--compress_output", action="store_true",
dest="compress_output")
parser.add_option("--nocompress_output", action="store_false",
dest="compress_output")
parser.add_option("-g", "--geodbg", action="store_true", dest="geocode_debug")
parser.add_option("--ftpinfo", dest="ftpinfo")
parser.add_option("--fs", "--fieldsep", action="store", dest="fs")
parser.add_option("--rs", "--recordsep", action="store", dest="rs")
parser.add_option("-n", "--maxrecords", action="store", dest="maxrecs")
(options, args) = parser.parse_args(sys.argv[1:])
if (len(args) == 0):
parser.print_help()
sys.exit(0)
if options.fs != None:
FIELDSEP = options.fs
if options.rs != None:
RECORDSEP = options.rs
if (options.debug):
DEBUG = True
geocoder.GEOCODE_DEBUG = True
PROGRESS = True
geocoder.SHOW_PROGRESS = True
FIELDSEP = "\n"
if (options.abridged):
ABRIDGED = True
if (options.geocode_debug):
geocoder.GEOCODE_DEBUG = True
if options.test:
options.progress = True
if (options.progress):
PROGRESS = True
geocoder.SHOW_PROGRESS = True
if options.ftpinfo and not options.outputfmt:
options.outputfmt = "basetsv"
OUTPUTFMT = options.outputfmt
return options, args
def open_input_filename(filename):
"""handle different file/URL opening methods."""
if re.search(r'^https?://', filename):
print_progress("starting download of "+filename)
outfh = urllib.urlopen(filename)
if (re.search(r'[.]gz$', filename)):
# is there a way to fetch and unzip an URL in one shot?
print_progress("ah, gzip format.")
content = outfh.read()
outfh.close()
print_progress("download done.")
tmp_fn = "/tmp/tmp-"+hashlib.md5().hexdigest()
tmpfh = open(tmp_fn, "wb+")
tmpfh.write(content)
tmpfh.close()
outfh = gzip.open(tmp_fn, 'rb')
return outfh
elif re.search(r'[.]gz$', filename):
return gzip.open(filename, 'rb')
elif filename == "-":
return sys.stdin
return open(filename, 'rb')
def test_parse(footprint_xmlstr, maxrecs):
"""run the data through and then re-parse the output."""
print datetime.now(), "testing input: generating Footprint XML..."
fpxml = convert_to_footprint_xml(footprint_xmlstr, True, int(maxrecs), True)
# free some RAM
del footprint_xmlstr
print datetime.now(), "testing input: parsing and regenerating FPXML..."
fpxml2 = convert_to_footprint_xml(fpxml, True, int(maxrecs), True)
print datetime.now(), "testing input: comparing outputs..."
hash1 = hashlib.md5(fpxml).hexdigest()
hash2 = hashlib.md5(fpxml2).hexdigest()
fn1 = "/tmp/pydiff-"+hash1
fn2 = "/tmp/pydiff-"+hash2
if hash1 == hash2:
print datetime.now(), "success: getting head...\n"
outfh = open(fn1, "w+")
outfh.write(fpxml)
outfh.close()
subprocess.call(['head', fn1])
else:
print datetime.now(), "errors-- hash1=" + hash1 + " hash2=" + \
hash2 + " running diff", fn1, fn2
outfh = open(fn1, "w+")
outfh.write(fpxml)
outfh.close()
outfh = open(fn2, "w+")
outfh.write(fpxml2)
outfh.close()
subprocess.call(['diff', fn1, fn2])
# grr-- difflib performance sucks
#for line in difflib.unified_diff(fpxml, fpxml2, \
# fromfile='(first output)', tofile='(second output)'):
#print line
def process_file(filename, options, providerName="", providerID="",
providerURL=""):
shortname = guess_shortname(filename)
inputfmt, parsefunc = guess_parse_func(options.inputfmt, filename)
infh = open_input_filename(filename)
print_progress("reading data...")
# don't put this inside open_input_filename() because it could be large
instr = infh.read()
print_progress("done reading data.")
# remove bad encodings etc.
if options.clean:
instr = clean_input_string(instr)
# split nasty XML inputs, to help isolate problems
if options.debug_input:
instr = re.sub(r'><', r'>\n<', instr)
print_progress("inputfmt: "+inputfmt)
print_progress("outputfmt: "+options.outputfmt)
print_status("input data: "+str(len(instr))+" bytes", shortname)
print_progress("parsing...")
footprint_xmlstr, numorgs, numopps = \
parsefunc(instr, int(options.maxrecs), PROGRESS)
if (providerID != "" and
footprint_xmlstr.find('<providerID></providerID>')):
footprint_xmlstr = re.sub(
'<providerID></providerID>',
'<providerID>%s</providerID>' % providerID, footprint_xmlstr)
if (providerName != "" and
footprint_xmlstr.find('<providerName></providerName>')):
footprint_xmlstr = re.sub(
'<providerName></providerName>',
'<providerName>%s</providerName>' % providerName, footprint_xmlstr)
if (providerURL != "" and
footprint_xmlstr.find('<providerURL></providerURL>')):
footprint_xmlstr = re.sub(
'<providerURL></providerURL>',
'<providerURL>%s</providerURL>' % providerURL, footprint_xmlstr)
if options.test:
# free some RAM
del instr
test_parse(footprint_xmlstr, options.maxrecs)
sys.exit(0)
fastparse = not options.debug_input
if OUTPUTFMT == "fpxml":
# TODO: pretty printing option
print convert_to_footprint_xml(footprint_xmlstr, fastparse,
int(options.maxrecs), PROGRESS)
sys.exit(0)
if OUTPUTFMT != "basetsv":
print >> sys.stderr, datetime.now(), \
"--outputfmt not implemented: try 'basetsv','fpbasetsv' or 'fpxml'"
sys.exit(1)
outstr, numorgs, numopps = convert_to_gbase_events_type(
footprint_xmlstr, shortname, fastparse, int(options.maxrecs), PROGRESS)
return len(footprint_xmlstr), numorgs, numopps, outstr
def main():
"""main function for cmdline execution."""
start_time = datetime.now()
options, args = parse_options()
filename = args[0]
if re.search("spreadsheets[.]google[.]com", filename):
if OUTPUTFMT == "fpxml":
pgs.parser_error("FPXML format not supported for "+
"spreadsheet-of-spreadsheets")
sys.exit(1)
match = re.search(r'key=([^& ]+)', filename)
url = "http://spreadsheets.google.com/feeds/cells/" + match.group(1)
url += "/1/public/basic"
# to avoid hitting 80 columns
data = {}
updated = {}
if PROGRESS:
print "processing master spreadsheet", url
maxrow, maxcol = pgs.read_gspreadsheet(url, data, updated, PROGRESS)
header_row, header_startcol = pgs.find_header_row(data, 'provider name')
# check to see if there's a header-description row
header_desc = pgs.cellval(data, header_row+1, header_startcol)
if not header_desc:
pgs.parser_error("blank row not allowed below header row")
sys.exit(1)
header_desc = header_desc.lower()
data_startrow = header_row + 1
if header_desc.find("example") >= 0:
data_startrow += 1
bytes = numorgs = numopps = 0
outstr = ""
for row in range(data_startrow, int(maxrow)+1):
providerName = pgs.cellval(data, row, header_startcol)
if providerName is None or providerName == "":
if PROGRESS:
print "missing provider name from row "+str(row)
break
providerID = pgs.cellval(data, row, header_startcol+1)
if providerID is None or providerID == "":
if PROGRESS:
print "missing provider ID from row "+str(row)
break
providerURL = pgs.cellval(data, row, header_startcol+2)
if providerURL is None or providerURL == "":
if PROGRESS:
print "missing provider URL from row "+str(row)
break
match = re.search(r'key=([^& ]+)', providerURL)
providerURL = "http://spreadsheets.google.com/feeds/cells/"
providerURL += match.group(1)
providerURL += "/1/public/basic"
if PROGRESS:
print "processing spreadsheet", providerURL, "name="+providerName
providerBytes, providerNumorgs, providerNumopps, tmpstr = process_file(
providerURL, options, providerName, providerID, providerURL)
if PROGRESS:
print "done processing spreadsheet: name="+providerName, \
"records="+str(providerNumopps), \
"url="+providerURL
bytes += providerBytes
numorgs += providerNumorgs
numopps += providerNumopps
outstr += tmpstr
else:
bytes, numorgs, numopps, outstr = process_file(filename, options)
#only need this if Base quoted fields it enabled
#outstr = re.sub(r'"', r'"', outstr)
if (options.ftpinfo):
ftp_to_base(filename, options.ftpinfo, outstr)
elif options.output == "":
print outstr,
elif options.compress_output:
gzip_fh = gzip.open(options.output, 'wb', 9)
gzip_fh.write(outstr)
gzip_fh.close()
else:
outfh = open(options.output, "w")
outfh.write(outstr)
outfh.close()
elapsed = datetime.now() - start_time
# NOTE: if you change this, you also need to update datahub/load_gbase.py
# and frontend/views.py to avoid breaking the dashboard-- other status
# messages don't matter.
shortname = guess_shortname(filename)
xmlh.print_status("done parsing: output " + str(numorgs) + " organizations" +
" and " + str(numopps) + " opportunities" +
" (" + str(bytes) + " bytes): " +
str(int(elapsed.seconds/60)) + " minutes.",
shortname, PROGRESS)
if __name__ == "__main__":
main()
| Python |
#!/usr/bin/env python
#
"""
script for loading into googlebase.
Usage: load_gbase.py username password
"""
import sys
import re
import gzip
import bz2
import logging
import subprocess
from datetime import datetime
import footprint_lib
USERNAME = ""
PASSWORD = ""
LOGPATH = "/home/footprint/public_html/datahub/dashboard/"
LOG_FN = "load_gbase.log"
LOG_FN_BZ2 = "load_gbase.log.bz2"
DETAILED_LOG_FN = "load_gbase_detail.log"
# this file needs to be copied over to frontend/autocomplete/
POPULAR_WORDS_FN = "popular_words.txt"
FIELD_STATS_FN = "field_stats.txt"
GEO_STATS_FN = "geo_stats.txt"
STOPWORDS = set([
'a', 'about', 'above', 'across', 'after', 'afterwards', 'again', 'against',
'all', 'almost', 'alone', 'along', 'already', 'also', 'although', 'always',
'am', 'among', 'amongst', 'amoungst', 'amount', 'an', 'and', 'another', 'any',
'anyhow', 'anyone', 'anything', 'anyway', 'anywhere', 'are', 'around', 'as',
'at', 'back', 'be', 'became', 'because', 'become', 'becomes', 'becoming',
'been', 'before', 'beforehand', 'behind', 'being', 'below', 'beside',
'besides', 'between', 'beyond', 'bill', 'both', 'bottom', 'but', 'by', 'call',
'can', 'cannot', 'cant', 'co', 'computer', 'con', 'could', 'couldnt', 'cry',
'de', 'describe', 'detail', 'do', 'done', 'down', 'due', 'during', 'each',
'eg', 'eight', 'either', 'eleven', 'else', 'elsewhere', 'empty', 'enough',
'etc', 'even', 'ever', 'every', 'everyone', 'everything', 'everywhere',
'except', 'few', 'fifteen', 'fify', 'fill', 'find', 'fire', 'first', 'five',
'for', 'former', 'formerly', 'forty', 'found', 'four', 'from', 'front','full',
'further', 'get', 'give', 'go', 'had', 'has', 'hasnt', 'have', 'he', 'hence',
'her', 'here', 'hereafter', 'hereby', 'herein', 'hereupon', 'hers', 'herself',
'him', 'himself', 'his', 'how', 'however', 'hundred', 'i', 'ie', 'if', 'in',
'inc', 'indeed', 'interest', 'into', 'is', 'it', 'its', 'itself', 'keep',
'last', 'latter', 'latterly', 'least', 'less', 'ltd', 'made', 'many', 'may',
'me', 'meanwhile', 'might', 'mill', 'mine', 'more', 'moreover', 'most',
'mostly', 'move', 'much', 'must', 'my', 'myself', 'name', 'namely', 'neither',
'never', 'nevertheless', 'next', 'nine', 'no', 'nobody', 'none', 'noone',
'nor', 'not', 'nothing', 'now', 'nowhere', 'of', 'off', 'often', 'on', 'once',
'one', 'only', 'onto', 'or', 'other', 'others', 'otherwise', 'our', 'ours',
'ourselves', 'out', 'over', 'own', 'part', 'per', 'perhaps', 'please', 'put',
'rather', 're', 'same', 'see', 'seem', 'seemed', 'seeming', 'seems',
'serious', 'several', 'she', 'should', 'show', 'side', 'since', 'sincere',
'six', 'sixty', 'so', 'some', 'somehow', 'someone', 'something', 'sometime',
'sometimes', 'somewhere', 'still', 'such', 'system', 'take', 'ten', 'than',
'that', 'the', 'their', 'them', 'themselves', 'then', 'thence', 'there',
'thereafter', 'thereby', 'therefore', 'therein', 'thereupon', 'these', 'they',
'thick', 'thin', 'third', 'this', 'those', 'though', 'three', 'through',
'throughout', 'thru', 'thus', 'to', 'together', 'too', 'top', 'toward',
'towards', 'twelve', 'twenty', 'two', 'un', 'under', 'until', 'up', 'upon',
'us', 'very', 'via', 'was', 'we', 'well', 'were', 'what', 'whatever', 'when',
'whence', 'whenever', 'where', 'whereafter', 'whereas', 'whereby', 'wherein',
'whereupon', 'wherever', 'whether', 'which', 'while', 'whither', 'who',
'whoever', 'whole', 'whom', 'whose', 'why', 'will', 'with', 'within',
'without', 'would', 'yet', 'you', 'your', 'yours', 'yourself', 'yourselves',
# custom stopwords for footprint
'url', 'amp', 'quot', 'help', 'http', 'search', 'nbsp', 'need', 'cache',
'vol', 'housingall', 'wantedall', 'personalsall', 'net', 'org', 'www',
'gov', 'yes', 'no', '999',
])
def print_progress(msg):
"""print progress message-- shutup pylint"""
print str(datetime.now())+": "+msg
KNOWN_WORDS = {}
def process_popular_words(content):
"""update the dictionary of popular words."""
# TODO: handle phrases (via whitelist, then later do something smart.
print_progress("cleaning content: %d bytes" % len(content))
cleaner_regexp = re.compile('<[^>]*>', re.DOTALL)
cleaned_content = re.sub(cleaner_regexp, '', content).lower()
print_progress("splitting words, %d bytes" % len(cleaned_content))
words = re.split(r'[^a-zA-Z0-9]+', cleaned_content)
print_progress("loading words")
for word in words:
# ignore common english words
if word in STOPWORDS:
continue
# ignore short words
if len(word) <= 2:
continue
if word not in KNOWN_WORDS:
KNOWN_WORDS[word] = 0
KNOWN_WORDS[word] += 1
print_progress("cleaning rare words from %d words" % len(KNOWN_WORDS))
for word in KNOWN_WORDS.keys():
if KNOWN_WORDS[word] < 2:
del KNOWN_WORDS[word]
print_progress("done: word dict size %d words" % len(KNOWN_WORDS))
def print_word_stats():
"""dump word stats."""
print_progress("final cleanse: keeping only words appearing 10 times")
for word in KNOWN_WORDS.keys():
if KNOWN_WORDS[word] < 10:
del KNOWN_WORDS[word]
sorted_words = list(KNOWN_WORDS.iteritems())
sorted_words.sort(cmp=lambda a, b: cmp(b[1], a[1]))
print_progress("writing "+POPULAR_WORDS_FN+"...")
popfh = open(LOGPATH+POPULAR_WORDS_FN, "w")
for word, freq in sorted(sorted_words):
popfh.write(str(freq)+"\t"+word+"\n")
popfh.close()
print_progress("done writing "+LOGPATH+POPULAR_WORDS_FN)
FIELD_VALUES = None
FIELD_NAMES = None
NUM_RECORDS_TOTAL = 0
LATLNG_DENSITY = {}
def process_field_stats(content):
"""update the field-value histograms."""
global FIELD_NAMES, FIELD_VALUES, NUM_RECORDS_TOTAL
for lineno, line in enumerate(content.splitlines()):
fields = line.split("\t")
if lineno == 0:
if FIELD_NAMES == None:
FIELD_NAMES = fields
FIELD_VALUES = [{} for i in range(len(fields))]
continue
NUM_RECORDS_TOTAL += 1
lat_val = lng_val = None
for i, val in enumerate(fields):
if lat_val is None and FIELD_NAMES[i].find('latitude') >= 0:
lat_val = val
if lng_val is None and FIELD_NAMES[i].find('longitude') >= 0:
lng_val = val
val = val[0:300]
if val in FIELD_VALUES[i]:
FIELD_VALUES[i][val] += 1
else:
FIELD_VALUES[i][val] = 1
lat_fltval = float(lat_val)
if lat_fltval > 500.0:
lat_fltval -= 1000.0
lng_fltval = float(lng_val)
if lng_fltval > 500.0:
lng_fltval -= 1000.0
lat_val = re.sub(r'([.]\d\d)\d+', r'\1', str(lat_fltval))
lng_val = re.sub(r'([.]\d\d)\d+', r'\1', str(lng_fltval))
latlng = lat_val + ',' + lng_val
if latlng in LATLNG_DENSITY:
LATLNG_DENSITY[latlng] += 1
else:
LATLNG_DENSITY[latlng] = 1
def print_field_stats():
"""dump field-value stats."""
print_progress("writing "+FIELD_STATS_FN+"...")
outfh = open(LOGPATH+FIELD_STATS_FN, "w")
outfh.write("number of records: "+str(NUM_RECORDS_TOTAL)+"\n")
for i, fieldname in enumerate(FIELD_NAMES):
outfh.write("field "+fieldname+":uniqvals="+str(len(FIELD_VALUES[i]))+"\n")
sorted_vals = list(FIELD_VALUES[i].iteritems())
sorted_vals.sort(cmp=lambda a, b: cmp(b[1], a[1]))
for val, freq in sorted_vals[0:1000]:
if freq < 10:
break
outfh.write(" %5d %s\n" % (freq, val))
outfh.close()
print_progress("done writing "+FIELD_STATS_FN)
def print_geo_stats():
print_progress("writing "+GEO_STATS_FN+"...")
outfh = open(LOGPATH+GEO_STATS_FN, "w")
for latlng, freq in LATLNG_DENSITY.iteritems():
outfh.write("%s %d\n" % (latlng, freq))
outfh.close()
print_progress("done writing "+GEO_STATS_FN)
def append_log(outstr):
"""append to the detailed and truncated log, for stats collection."""
outfh = open(LOGPATH+DETAILED_LOG_FN, "a")
outfh.write(outstr)
outfh.close()
outfh = open(LOGPATH+LOG_FN, "a")
for line in outstr.split('\n'):
if re.search(r'(STATUS|ERROR)', line):
outfh.write(line+"\n")
outfh.close()
# create a bzip2 file from the log
infh = open(LOGPATH+LOG_FN, "r")
data = infh.read()
infh.close()
outfh = bz2.BZ2File(LOGPATH+LOG_FN_BZ2, "w")
outfh.write(data)
outfh.close()
def error_exit(msg):
"""Print an error message to stderr and exit."""
print >> sys.stderr, msg
sys.exit(1)
# Use a shell for subcommands on Windows to get a PATH search.
USE_SHELL = sys.platform.startswith("win")
def run_shell_with_retcode(command, print_output=False,
universal_newlines=True):
"""Executes a command and returns the output from stdout and the return code.
Args:
command: Command to execute.
print_output: If True, the output is printed to stdout.
If False, both stdout and stderr are ignored.
universal_newlines: Use universal_newlines flag (default: True).
Returns:
Tuple (output, return code)
"""
logging.info("Running %s", command)
proc = subprocess.Popen(command, stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=USE_SHELL,
universal_newlines=universal_newlines)
if print_output:
output_array = []
while True:
line = proc.stdout.readline()
if not line:
break
print line.strip("\n")
output_array.append(line)
output = "".join(output_array)
else:
output = proc.stdout.read()
proc.wait()
errout = proc.stderr.read()
if print_output and errout:
print >> sys.stderr, errout
proc.stdout.close()
proc.stderr.close()
append_log(output)
append_log(errout)
return output, errout, proc.returncode
def run_shell(command, silent_ok=False, universal_newlines=True,
print_output=False):
"""run a shell command."""
stdout, stderr, retcode = run_shell_with_retcode(command, print_output,
universal_newlines)
#if retcode and retcode != 0:
#error_exit("Got error status from %s:\n%s\n%s" % (command, stdout, stderr))
if not silent_ok and not stdout:
error_exit("No output from %s" % command)
return stdout, stderr, retcode
def load_gbase(name, url, do_processing=True, do_ftp=True):
"""shutup pylint."""
print_progress("loading "+name+" from "+url)
# run as a subprocess so we can ignore failures and keep going.
# later, we'll run these concurrently, but for now we're RAM-limited.
# ignore retcode
# match the filenames to the feed filenames in Google Base, so we can
# manually upload for testing.
tsv_filename = name+"1.gz"
if do_processing:
stdout, stderr, retcode = run_shell(["./footprint_lib.py", "--progress",
#"--ftpinfo", USERNAME+":"+PASSWORD,
"--output", tsv_filename, url,
"--compress_output" ],
silent_ok=True, print_output=False)
print stdout,
if stderr and stderr != "":
print name+":STDERR: ", re.sub(r'\n', '\n'+name+':STDERR: ', stderr)
if retcode and retcode != 0:
print name+":RETCODE: "+str(retcode)
print "reading TSV data..."
gzip_fh = gzip.open(tsv_filename, 'r')
tsv_data = gzip_fh.read()
gzip_fh.close()
print "processing field stats..."
process_field_stats(tsv_data)
print "processing popular words..."
process_popular_words(tsv_data)
if do_ftp:
print_progress("ftp'ing to base")
footprint_lib.PROGRESS = True
footprint_lib.ftp_to_base(name, USERNAME+":"+PASSWORD, tsv_data)
print_progress("load_gbase: done.")
def test_loaders():
"""for testing, read from local disk as much as possible."""
load_gbase("americanredcross", "americanredcross.xml", False, False)
load_gbase("mlk_day", "mlk_day.xml", False, False)
load_gbase("gspreadsheets",
"https://spreadsheets.google.com/ccc?key=rOZvK6aIY7HgjO-hSFKrqMw", False, False)
load_gbase("craigslist", "craigslist-cache.txt", False, False)
def loaders():
"""put all loaders in one function for easier testing."""
load_gbase("americanredcross", "americanredcross.xml")
load_gbase("americansolutions", "americansolutions.xml")
load_gbase("americorps", "americorps.xml")
load_gbase("christianvolunteering", "christianvolunteering.xml")
load_gbase("citizencorps", "citizencorps.xml")
load_gbase("extraordinaries", "extraordinaries.xml")
load_gbase("givingdupage", "givingdupage.xml")
load_gbase("habitat", "habitat.xml")
load_gbase("handsonnetwork", "handsonnetwork.xml")
load_gbase("idealist", "idealist.xml")
load_gbase("meetup", "meetup.xml")
load_gbase("mentorpro", "mentorpro.xml")
load_gbase("mlk_day", "mlk_day.xml")
load_gbase("mybarackobama", "mybarackobama.xml")
load_gbase("myproj_servegov", "myproj_servegov.xml")
load_gbase("seniorcorps", "seniorcorps.xml")
load_gbase("servenet", "servenet.xml")
load_gbase("unitedway", "unitedway.xml")
load_gbase("volunteergov", "volunteergov.xml")
load_gbase("volunteermatch", "volunteermatch.xml")
load_gbase("volunteertwo", "volunteertwo.xml")
load_gbase("ymca", "ymca.xml")
# requires special crawling
load_gbase("gspreadsheets",
"https://spreadsheets.google.com/ccc?key=rOZvK6aIY7HgjO-hSFKrqMw")
# note: craiglist crawler is run asynchronously, hence the local file
load_gbase("craigslist", "craigslist-cache.txt")
# out for launch
# load_gbase("mybarackobama",
# "http://my.barackobama.com/page/event/search_results?"+
# "format=footprint")
# old custom feed
# legacy-- to be safe, remove after 9/1/2009
#load_gbase("idealist", "http://feeds.idealist.org/xml/feeds/"+
# "Idealist-VolunteerOpportunity-VOLUNTEER_OPPORTUNITY_TYPE."+
# "en.open.atom.gz")
def main():
"""shutup pylint."""
global USERNAME, PASSWORD
if len(sys.argv) < 3:
print "Usage:", sys.argv[0], "<gbase username> <password>"
sys.exit(1)
USERNAME = sys.argv[1]
PASSWORD = sys.argv[2]
if USERNAME == "test":
global LOGPATH
LOGPATH = "./"
test_loaders()
else:
loaders()
print_word_stats()
print_field_stats()
print_geo_stats()
if __name__ == "__main__":
main()
| Python |
#!/usr/bin/python
# Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Geocoder and address functions for backend, using Google Maps API.
"""
import re
import time
import urllib
import xml_helpers as xmlh
from datetime import datetime
# Show status messages (also applies to xml parsing)
SHOW_PROGRESS = False
# Show detailed debug messages (just for the geocoder)
GEOCODE_DEBUG = False
def print_debug(msg):
"""print debug message."""
if GEOCODE_DEBUG:
print datetime.now(), msg
def normalize_cache_key(query):
"""Simplifies the query for better matching in the cache."""
query = re.sub(r'\\[tnrfv]', r' ', query)
query = re.sub(r'\s\s+', r' ', query)
query = query.lower().strip()
return query
def filter_cache_delimiters(s):
for delim in (r'\n', r'\|', r';'):
s = re.sub(delim, r' ', s)
return s
GEOCODE_CACHE = None
GEOCODE_CACHE_FN = "geocode_cache.txt"
def geocode(query):
"""Looks up a location query using GMaps API with a local cache and
returns: address, latitude, longitude, accuracy (as strings). On
failure, returns False.
Accuracy levels:
7-9 = street address, 6 = road, 5 = zip code
4 = city, 3 = county, 2 = state, 1 = country"""
global GEOCODE_CACHE
query = filter_cache_delimiters(query)
# load the cache
if GEOCODE_CACHE == None:
GEOCODE_CACHE = {}
geocode_fh = open(GEOCODE_CACHE_FN, "r")
try:
for line in geocode_fh:
# Cache line format is:
# query|address;latitude;longitude;accuracy
# For example:
# ca|California;36.7782610;-119.4179324;2
# Or, if the location can't be found:
# Any city anywhere|
if "|" in line:
key, result = line.strip().split("|")
key = normalize_cache_key(key)
if ";" in result:
result = tuple(result.split(";"))
else:
result = False
GEOCODE_CACHE[key] = result
if len(GEOCODE_CACHE) % 250 == 0:
print_debug("read " + str(len(GEOCODE_CACHE)) +
" geocode cache entries.")
finally:
geocode_fh.close()
# try the cache
key = normalize_cache_key(query)
if key in GEOCODE_CACHE:
return GEOCODE_CACHE[key]
# call Google Maps API
result = geocode_call(query)
print_debug("geocode result: " + str(result))
if result == False:
return False # do not cache
# cache the result
if result == None:
result = False
cacheline = query + "|"
else:
result = map(filter_cache_delimiters, result)
cacheline = query + "|" + ";".join(result)
geocode_fh = open(GEOCODE_CACHE_FN, "a")
xmlh.print_progress("storing cacheline: "+cacheline, "", SHOW_PROGRESS)
geocode_fh.write(cacheline + "\n")
geocode_fh.close()
GEOCODE_CACHE[key] = result
return result
def geocode_call(query, retries=4):
"""Queries the Google Maps geocoder and returns: address, latitude,
longitude, accuracy (as strings). Returns None if the query is
invalid (result can be cached). Returns False on error (do not
cache)."""
if retries < 0:
print_debug("geocoder retry limit exceeded")
return False
print_debug("geocoding '" + query + "'...")
params = urllib.urlencode(
{'q':query, 'output':'xml',
'oe':'utf8', 'sensor':'false',
'key':'ABQIAAAAxq97AW0x5_CNgn6-nLxSrxQuOQhskTx7t90ovP5xOuY' + \
'_YrlyqBQajVan2ia99rD9JgAcFrdQnTD4JQ'})
try:
maps_fh = urllib.urlopen("http://maps.google.com/maps/geo?%s" % params)
res = maps_fh.read()
maps_fh.close()
except IOError, err:
print_debug("Error contacting Maps API. Sleeping. " + str(err))
time.sleep(1)
return geocode_call(query, retries - 1)
print_debug("response length: "+str(len(res)))
node = xmlh.simple_parser(res, [], SHOW_PROGRESS)
respcode = xmlh.get_tag_val(node, "code")
if respcode == "":
print_debug("unparseable response: "+res)
return False
respcode = int(respcode)
if respcode in (400, 601, 602, 603): # problem with the query
return None
if respcode in (500, 620): # problem with the server
print_debug("Connection problem or quota exceeded. Sleeping...")
time.sleep(1)
return geocode_call(query, retries - 1)
if respcode != 200:
return False
# TODO(danyq): if the query is a lat/lng, find the city-level
# address, not just the first one.
addr = xmlh.get_tag_val(node, "address")
# TODO(danyq): Return street/city/country fields separately so that
# the frontend can decide what to display. For now, this hack just
# removes "USA" from all addresses.
addr = re.sub(r', USA$', r'', addr)
coords = xmlh.get_tag_val(node, "coordinates")
if coords == "":
coords = "0.0,0.0,0"
# Note the order: maps API returns "longitude,latitude,altitude"
lng, lat = coords.split(",")[:2]
accuracy = xmlh.get_tag_attr(node, "AddressDetails", "Accuracy")
if accuracy == "":
accuracy = "0"
return (addr, lat, lng, accuracy)
| Python |
#!/usr/bin/python
# Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#http://usaservice.org/page/event/search_results?orderby=day&state=CA&country=US&event_type%5b%5d=&limit=1000&radius_unit=miles&format=commons_rss&wrap=no
from xml.dom import minidom
import sys
import os
import urllib
import re
import thread
import time
from datetime import datetime
import socket
DEFAULT_TIMEOUT = 30
socket.setdefaulttimeout(DEFAULT_TIMEOUT)
STATES = ['AA','AE','AK','AL','AP','AR','AS','AZ','CA','CO','CT','DC','DE','FL','FM','GA','GU','HI','IA','ID','IL','IN','KS','KY','LA','MA','MD','ME','MH','MI','MN','MO','MP','MS','MT','NC','ND','NE','NH','NJ','NM','NV','NY','OH','OK','OR','PA','PR','PW','RI','SC','SD','TN','TX','UT','VA','VI','VT','WA','WI','WV','WY','AB','BC','MB','NB','NL','NT','NS','NU','ON','PE','QC','SK','YT','na']
OUTPUT_FN = "usaservice.txt"
file_lock = thread.allocate_lock()
crawlers = 0
crawlers_lock = thread.allocate_lock()
def get_url(state):
url = "http://usaservice.org/page/event/search_results?orderby=day&state="
url += state+"&country=US&event_type%5b%5d=&limit=1000&radius_unit=miles&format=commons_rss&wrap=no"
return url
def crawl_state(state, ignore):
global crawlers, crawlers_lock, OUTPUT_FN, file_lock
crawlers_lock.acquire()
crawlers = crawlers + 1
crawlers_lock.release()
while crawlers > 10:
time.sleep(1)
try:
url = get_url(state)
fh = urllib.urlopen(url)
rss = fh.read()
fh.close()
items = re.findall(r'<item>.+?</item>', rss, re.DOTALL)
if len(items) > 0:
print datetime.now(), "found", len(items), "items for state", state
outstr = ""
for item in items:
item = re.sub(r'(?:\r?\n|\r)',' ', item)
if re.search(r'Find Money For Next 12 Months', item):
continue
outstr += item + "\n"
file_lock.acquire()
outfh = open(OUTPUT_FN, "a")
outfh.write(outstr)
outfh.close()
file_lock.release()
except:
pass
crawlers_lock.acquire()
crawlers = crawlers - 1
crawlers_lock.release()
from optparse import OptionParser
if __name__ == "__main__":
try:
os.unlink(OUTPUT_FN)
except:
pass
for state in STATES:
thread.start_new_thread(crawl_state, (state, "foo"))
# give them a chance to start
time.sleep(1)
while (crawlers > 0):
print datetime.now(), "waiting for", crawlers, "crawlers to finish."
time.sleep(1)
sys.exit(0)
| Python |
#!/usr/bin/python
# Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# TODO: remove silly dependency on dapper.net-- thought I'd need
# it for the full scrape, but ended up not going that way.
"""crawler for craigslist until they provide a real feed."""
from xml.dom import minidom
import sys
import os
import urllib
import re
import thread
import time
import datetime
import socket
DEFAULT_TIMEOUT = 10
socket.setdefaulttimeout(DEFAULT_TIMEOUT)
METROS_FN = "craigslist-metros.txt"
CACHE_FN = "craigslist-cache.txt"
pages = {}
page_lock = thread.allocate_lock()
crawlers = 0
crawlers_lock = thread.allocate_lock()
cachefile_lock = thread.allocate_lock()
# set to a lower number if you have problems
MAX_CRAWLERS = 40
def read_metros():
global metros
metros = {}
fh = open(METROS_FN, "r")
for line in fh:
url,name = line.split("|")
metros[url] = name
def crawl_metros():
#<geo dataType="RawString" fieldName="geo" href="http://waterloo.craigslist.org/" originalElement="a" type="field">waterloo / cedar falls</geo>
print "getting toplevel geos..."
fh = urllib.urlopen("http://www.dapper.net/RunDapp?dappName=craigslistmetros&v=1&applyToUrl=http%3A%2F%2Fgeo.craigslist.org%2Fiso%2Fus")
geostr = fh.read()
fh.close()
dom = minidom.parseString(geostr)
nodes = dom.getElementsByTagName("geo")
outfh = open(METROS_FN, "w+")
domains = []
for node in nodes:
domain = node.getAttribute("href")
#print "finding submetros within", domain
fh1 = urllib.urlopen(domain)
domain_homepage = fh1.read()
fh1.close()
#<td align="center" colspan="5" id="topban">
#<div>
#<h2>new york city</h2> <sup><a href="http://en.wikipedia.org/wiki/New_York_City">w</a></sup>
#<span class="for"><a href="/mnh/" title="manhattan">mnh</a> <a href="/brk/" title="brooklyn">brk</a> <a href="/que/" title="queens">que</a> <a href="/brx/" title="bronx">brx</a> <a href="/stn/" title="staten island">stn</a> <a href="/jsy/" title="new jersey">jsy</a> <a href="/lgi/" title="long island">lgi</a> <a href="/wch/" title="westchester">wch</a> <a href="/fct/" title="fairfield">fct</a> </span>
#</div>
#</td>
topbanstrs = re.findall(r'<td align="center" colspan="5" id="topban">.+?</td>', domain_homepage, re.DOTALL)
for topbanstr in topbanstrs:
links = re.findall(r'<a href="/(.+?)".+?title="(.+?)".+?</a>', topbanstr, re.DOTALL)
if len(links) > 0:
for link in links:
print domain+link[0], ":", link[1]
outfh.write(domain+link[0]+"|"+link[1]+"\n")
else:
names = re.findall(r'<h2>(.+?)</h2>', domain_homepage, re.DOTALL)
print domain, ":", names[0]
outfh.write(domain+"|"+names[0]+"\n")
outfh.close()
def crawl(url, ignore):
global crawlers, crawlers_lock, pages, page_lock, MAX_CRAWLERS
if url in pages:
return
while crawlers > MAX_CRAWLERS:
time.sleep(1)
# we don't care if several wake at once
crawlers_lock.acquire()
crawlers = crawlers + 1
crawlers_lock.release()
#proxied_url = "http://suprfetch.appspot.com/?url="+urllib.quote(url+"?for_google_and_craigslist.org_project_footprint_please_dont_block")
proxied_url = "http://suprfetch.appspot.com/?url="+urllib.quote(url)
page = ""
attempts = 0
while attempts < 3 and page == "":
try:
fh = urllib.urlopen(proxied_url)
page = fh.read()
fh.close()
except:
page = "" # in case close() threw exception
attempts = attempts + 1
print "open failed, retry after", attempts, "attempts (url="+url+")"
time.sleep(1)
if re.search(r'This IP has been automatically blocked', page, re.DOTALL):
print "uh oh: craiglist is blocking us (IP blocking). exiting..."
sys.exit(1)
if (re.search(r'sorry.google.com/sorry/', page) or
re.search(r'to automated requests from a computer virus or spyware', page, re.DOTALL)):
print "uh oh: google is blocking us (DOS detector). exiting..."
sys.exit(1)
if re.search(r'<TITLE>302 Moved</TITLE>"',page, re.DOTALL):
newlocstr = re.findall(r'The document has moved <A HREF="(.+?)"',page)
print "being redirected to",newlocstr[0]
crawl(newlocstr[0], "foo")
return
if attempts >= 3:
print "crawl failed after 3 attempts:",url
return
page_lock.acquire()
pages[url] = page
page_lock.release()
cached_page = re.sub(r'(?:\r?\n|\r)',' ',page)
cachefile_lock.acquire()
outfh = open(CACHE_FN, "a")
outfh.write(url+"-Q-"+cached_page+"\n")
outfh.close()
cachefile_lock.release()
crawlers_lock.acquire()
crawlers = crawlers - 1
crawlers_lock.release()
def wait_for_page(url):
res = ""
while res == "":
page_lock.acquire()
if url in pages:
res = pages[url]
page_lock.release()
if res == "":
time.sleep(2)
return res
def sync_fetch(url):
crawl(url, "")
if url not in pages:
print "sync_fetch, failed to crawl url",url
sys.exit(1)
return pages[url]
progstart = time.time()
def secs_since_progstart():
global progstart
return time.time() - progstart
def crawl_metro_page(url, unused):
global crawlers, crawlers_lock, pages, page_lock
listingpage = sync_fetch(url)
listingurls = re.findall(r'<p><a href="/(.+?)">', listingpage)
base = re.sub(r'.org/.+', '.org/', url)
for listing_url in listingurls:
#print "found",base+listing_url,"in",url
crawl(base+listing_url, "")
path = re.sub(r'[^/]+$', '', url)
nextpages = re.findall(r'<a href="(index[0-9]+[.]html)"', listingpage)
for nextpage_url in nextpages:
#print "found",path+nextpage_url,"in",url
thread.start_new_thread(crawl_metro_page, (path+nextpage_url, ""))
def parse_cache_file(s, listings_only=False, printerrors=True):
global pages
for i,line in enumerate(s.splitlines()):
#print line[0:100]
res = re.findall(r'^(.+?)-Q-(.+)', line)
try:
url,page = res[0][0], res[0][1]
if (not listings_only or re.search(r'html$', url)):
pages[url] = page
except:
if printerrors:
print "error parsing cache file on line",i+1
print line
def load_cache():
global CACHE_FN
try:
fh = open(CACHE_FN, "r")
instr = fh.read()
print "closing cache file", CACHE_FN
fh.close()
print "parsing cache data", len(instr), "bytes"
parse_cache_file(instr, False)
print "loaded", len(pages), "pages."
except:
# ignore errors if file doesn't exist
pass
def print_status():
global pages, num_cached_pages, crawlers
samesame = 0
last_crawled_pages = 0
while True:
crawled_pages = len(pages) - num_cached_pages
pages_per_sec = int(crawled_pages/secs_since_progstart())
msg = str(secs_since_progstart())+": main thread: "
msg += "waiting for " + str(crawlers) + " crawlers.\n"
msg += str(crawled_pages) + " pages crawled so far"
msg += "(" + str(pages_per_sec) + " pages/sec). "
msg += str(len(pages)) + " total pages."
print msg
if last_crawled_pages == crawled_pages:
samesame += 1
if samesame >= 100:
print "done (waited long enough)."
break
else:
last_crawled_pages = crawled_pages
time.sleep(2)
from optparse import OptionParser
if __name__ == "__main__":
parser = OptionParser("usage: %prog [options]...")
parser.set_defaults(metros=False)
parser.set_defaults(load_cache=True)
parser.add_option("--metros", action="store_true", dest="metros")
parser.add_option("--load_cache", action="store_true", dest="load_cache")
parser.add_option("--noload_cache", action="store_false", dest="load_cache")
(options, args) = parser.parse_args(sys.argv[1:])
if options.metros:
crawl_metros()
read_metros()
if options.load_cache:
load_cache()
else:
try:
os.unlink(CACHE_FN)
except:
pass
num_cached_pages = len(pages)
outstr = ""
for url in metros:
thread.start_new_thread(crawl_metro_page, (url+"vol/", ""))
print_status()
sys.exit(0)
| Python |
#!/usr/bin/python
# Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
main() for the crawling/parsing/loading pipeline
"""
#from xml.dom.ext import PrettyPrint
import gzip
import hashlib
import urllib
import re
from datetime import datetime
import geocoder
import parse_footprint
import parse_gspreadsheet as pgs
import parse_usaservice
import parse_networkforgood
import parse_idealist
import parse_craigslist
import parse_volunteermatch
import subprocess
import sys
import time
import xml_helpers as xmlh
from optparse import OptionParser
import dateutil
import dateutil.tz
import dateutil.parser
FIELDSEP = "\t"
RECORDSEP = "\n"
MAX_ABSTRACT_LEN = 300
DEBUG = False
PROGRESS = False
PRINTHEAD = False
ABRIDGED = False
OUTPUTFMT = "fpxml"
# set a nice long timeout
import socket
socket.setdefaulttimeout(600.0)
# pick a latlng that'll never match real queries
UNKNOWN_LAT = UNKNOWN_LNG = "-10"
UNKNOWN_LATLNG = UNKNOWN_LAT + "," + UNKNOWN_LNG
# pick a latlng that'll never match real queries
LOCATIONLESS_LAT = LOCATIONLESS_LNG = "0"
LOCATIONLESS_LATLNG = LOCATIONLESS_LAT + "," + LOCATIONLESS_LNG
HEADER_ALREADY_OUTPUT = False
#BASE_PUB_URL = "http://change.gov/"
BASE_PUB_URL = "http://adamsah.net/"
SEARCHFIELDS = {
# required
"description":"builtin",
"event_date_range":"builtin",
"link":"builtin",
"location":"builtin",
"title":"builtin",
# needed for search restricts
"latitude":"float",
"longitude":"float",
# needed for query by time-of-day
"startTime":"integer",
"endTime":"integer",
# needed for basic search results
"id":"builtin",
"detailURL":"URL",
"abstract":"string",
"location_string":"string",
"feed_providerName":"string",
}
FIELDTYPES = {
"title":"builtin",
"description":"builtin",
"link":"builtin",
"event_type":"builtin",
"quantity":"builtin",
"image_link":"builtin",
"event_date_range":"builtin",
"id":"builtin",
"location":"builtin",
"paid":"boolean",
"openended":"boolean",
"volunteersSlots":"integer",
"volunteersFilled":"integer",
"volunteersNeeded":"integer",
"minimumAge":"integer",
"startTime":"integer",
"endTime":"integer",
"latitude":"float",
"longitude":"float",
"providerURL":"URL",
"detailURL":"URL",
"org_organizationURL":"URL",
"org_logoURL":"URL",
"org_providerURL":"URL",
"feed_providerURL":"URL",
"lastUpdated":"dateTime",
"expires":"dateTime",
"feed_createdDateTime":"dateTime",
# note: type "location" isn"t safe because the Base geocoder can fail,
# causing the record to be rejected
"duration":"string",
"abstract":"string",
"sexRestrictedTo":"string",
"skills":"string",
"contactName":"string",
"contactPhone":"string",
"contactEmail":"string",
"language":"string",
"org_name":"string",
"org_missionStatement":"string",
"org_description":"string",
"org_phone":"string",
"org_fax":"string",
"org_email":"string",
"categories":"string",
"audiences":"string",
"commitmentHoursPerWeek":"string",
"employer":"string",
"feed_providerName":"string",
"feed_description":"string",
"providerID":"string",
"feed_providerID":"string",
"feedID":"string",
"opportunityID":"string",
"organizationID":"string",
"sponsoringOrganizationID":"strng",
"volunteerHubOrganizationID":"string",
"org_nationalEIN":"string",
"org_guidestarID":"string",
"venue_name":"string",
"location_string":"string",
"orgLocation":"string",
}
def print_progress(msg, filename="", progress=None):
"""print progress indicator."""
# not allowed to say progress=PROGRESS as a default arg
if progress == None:
progress = PROGRESS
xmlh.print_progress(msg, filename, progress=progress)
def print_status(msg, filename="", progress=None):
"""print status indicator, for stats collection."""
if progress == None:
progress = PROGRESS
xmlh.print_status(msg, filename, progress=progress)
def print_debug(msg):
"""print debug message."""
if DEBUG:
print datetime.now(), msg
# Google Base uses ISO8601... in PST -- I kid you not:
# http://base.google.com/support/bin/answer.py?
# answer=78170&hl=en#Events%20and%20Activities
# and worse, you have to change an env var in python...
def convert_dt_to_gbase(datestr, timestr, timezone):
"""converts dates like YYYY-MM-DD, times like HH:MM:SS and
timezones like America/New_York, into Google Base format."""
try:
tzinfo = dateutil.tz.tzstr(timezone)
except:
tzinfo = dateutil.tz.tzutc()
try:
timestr = dateutil.parser.parse(datestr + " " + timestr)
except:
print "error parsing datetime: "+datestr+" "+timestr
return ""
timestr = timestr.replace(tzinfo=tzinfo)
pst = dateutil.tz.tzstr("PST8PDT")
timestr = timestr.astimezone(pst)
if timestr.year < 1900:
timestr = timestr.replace(year=timestr.year+1900)
res = timestr.strftime("%Y-%m-%dT%H:%M:%S")
res = re.sub(r'Z$', '', res)
return res
CSV_REPEATED_FIELDS = ['categories', 'audiences']
DIRECT_MAP_FIELDS = [
'opportunityID', 'organizationID', 'volunteersSlots', 'volunteersFilled',
'volunteersNeeded', 'minimumAge', 'sexRestrictedTo', 'skills', 'contactName',
'contactPhone', 'contactEmail', 'providerURL', 'language', 'lastUpdated',
'expires', 'detailURL']
ORGANIZATION_FIELDS = [
'nationalEIN', 'guidestarID', 'name', 'missionStatement', 'description',
'phone', 'fax', 'email', 'organizationURL', 'logoURL', 'providerURL']
def flattener_value(node):
"""return a DOM node's first child, sans commas"""
if (node.firstChild != None):
return node.firstChild.data.replace(",", "")
else:
return ""
def flatten_to_csv(domnode):
"""prints the children of a DOM node as CSV separated strings"""
# pylint: disable-msg=W0141
return ",".join(filter(lambda x: x != "",
map(flattener_value, domnode.childNodes)))
def output_field(name, value):
"""print a field value, handling long strings, header lines and
custom datatypes."""
#global PRINTHEAD, DEBUG
if PRINTHEAD:
if name not in FIELDTYPES:
print datetime.now(), "no type for field: " + name + FIELDTYPES[name]
sys.exit(1)
elif FIELDTYPES[name] == "builtin":
return name
elif OUTPUTFMT == "basetsv":
return "c:"+name+":"+FIELDTYPES[name]
else:
return name+":"+FIELDTYPES[name]
if OUTPUTFMT == "basetsv":
# grr: Base tries to treat commas in custom fields as being lists ?!
# http://groups.google.com/group/base-help-basics/browse_thread/thread/
# c4f51447191a6741
# TODO: note that this may cause fields to expand beyond their maxlen
# (e.g. abstract)
value = re.sub(r',', ';;', value)
if DEBUG:
if (len(value) > 70):
value = value[0:67] + "... (" + str(len(value)) + " bytes)"
return name.rjust(22) + " : " + value
if (FIELDTYPES[name] == "dateTime"):
return convert_dt_to_gbase(value, "", "UTC")
return value
def get_addr_field(node, field):
"""assuming a node is named (field), return it with optional trailing spc."""
addr = xmlh.get_tag_val(node, field)
if addr != "":
addr += " "
return addr
def get_city_addr_str(node):
"""synthesize a city-region-postal-country string."""
# note: avoid commas, so it works with CSV
loc = ""
loc += get_addr_field(node, "city")
loc += get_addr_field(node, "region")
loc += get_addr_field(node, "postalCode")
loc += get_addr_field(node, "country")
return loc
def get_street_addr_str(node):
"""concatenate street address fields"""
loc = get_addr_field(node, "streetAddress1")
loc += get_addr_field(node, "streetAddress2")
loc += get_addr_field(node, "streetAddress3")
return loc
def get_full_addr_str(node):
"""concatenate street address and city/region/postal/country fields"""
loc = get_street_addr_str(node)
loc += get_city_addr_str(node)
return loc
def find_geocoded_location(node):
"""Try a multitude of field combinations to get a geocode. Returns:
raw_location, address, latitude, longitude, accuracy (as strings)."""
# Combinations of fields to try geocoding.
field_combinations = \
["streetAddress1,streetAddress2,streetAddress3,"
+ "city,region,postalCode,country",
"streetAddress2,streetAddress3,"
+ "city,region,postalCode,country",
"streetAddress3,city,region,postalCode,country",
"city,region,postalCode,country",
"postalCode,country",
"city,region,country",
"region,country",
"latitude,longitude"]
# Upper bound on the accuracy provided by a given field. This
# prevents false positives like matching the city field to a street
# name.
field_accuracy = { "streetAddress1": 9,
"streetAddress2": 9,
"streetAddress3": 9,
"city": 5,
"region": 5,
"postalCode": 5,
"country": 1,
"latitude": 9,
"longitude": 9 }
for fields in field_combinations:
field_list = fields.split(",")
# Compose the query and find the max accuracy.
query = []
max_accuracy = 0
for field in field_list:
field_val = xmlh.get_tag_val(node, field)
if field_val != "":
query += [field_val]
max_accuracy = max(max_accuracy, field_accuracy[field])
query = ",".join(query)
print_debug("trying: " + query + " (" + str(max_accuracy) + ")")
result = geocoder.geocode(query)
if result:
addr, lat, lng, acc = result
if int(acc) <= max_accuracy:
print_debug("success: " + str(result))
return result
print_debug("incorrect accuracy: " + str(result))
result = (get_full_addr_str(node), "0.0", "0.0", "0")
print_debug("failed: " + str(result))
return result
def output_loc_field(node, mapped_name):
"""macro for output_field( convert node to loc field )"""
return output_field(mapped_name,
get_street_addr_str(node)+get_city_addr_str(node))
def output_tag_value(node, fieldname):
"""macro for output_field( get node value )"""
return output_field(fieldname, xmlh.get_tag_val(node, fieldname))
def output_tag_value_renamed(node, xmlname, newname):
"""macro for output_field( get node value ) then emitted as newname"""
return output_field(newname, xmlh.get_tag_val(node, xmlname))
def compute_stable_id(opp, org, locstr, openended, duration,
hrs_per_week, startend):
"""core algorithm for computing an opportunity's unique id."""
if DEBUG:
print "opp=" + str(opp) # shuts up pylint
eid = xmlh.get_tag_val(org, "nationalEIN")
if (eid == ""):
# support informal "organizations" that lack EINs
eid = xmlh.get_tag_val(org, "organizationURL")
# TODO: if two providers have same listing, good odds the
# locations will be slightly different...
loc = locstr
# TODO: if two providers have same listing, the time info
# is unlikely to be exactly the same, incl. missing fields
timestr = openended + duration + hrs_per_week + startend
title = get_title(opp)
abstract = get_abstract(opp)
detailURL = xmlh.get_tag_val(opp, 'detailURL')
hashstr = "\t".join([eid, loc, timestr, title, abstract, detailURL])
return hashlib.md5(hashstr).hexdigest()
def get_abstract(opp):
"""process abstract-- shorten, strip newlines and formatting.
TODO: cache/memoize this."""
abstract = xmlh.get_tag_val(opp, "abstract")
if abstract == "":
abstract = xmlh.get_tag_val(opp, "description")
abstract = cleanse_snippet(abstract)
return abstract[:MAX_ABSTRACT_LEN]
def get_direct_mapped_fields(opp, org):
"""map a field directly from FPXML to Google Base."""
outstr = output_field("abstract", get_abstract(opp))
if ABRIDGED:
return outstr
paid = xmlh.get_tag_val(opp, "paid")
if (paid == "" or paid.lower()[0] != "y"):
paid = "n"
else:
paid = "y"
outstr += FIELDSEP + output_field("paid", paid)
for field in DIRECT_MAP_FIELDS:
outstr += FIELDSEP + output_tag_value(opp, field)
for field in ORGANIZATION_FIELDS:
outstr += FIELDSEP + output_field("org_"+field,
xmlh.get_tag_val(org, field))
for field in CSV_REPEATED_FIELDS:
outstr += FIELDSEP
fieldval = opp.getElementsByTagName(field)
val = ""
if (fieldval.length > 0):
val = flatten_to_csv(fieldval[0])
outstr += output_field(field, val)
# orgLocation
outstr += FIELDSEP
fieldval = opp.getElementsByTagName("orgLocation")
if (fieldval.length > 0):
outstr += output_loc_field(fieldval[0], "orgLocation")
else:
outstr += output_field("orgLocation", "")
return outstr
def get_base_other_fields(opp, org):
"""These are fields that exist in other Base schemas-- for the sake of
possible syndication, we try to make ourselves look like other Base
feeds. Since we're talking about a small overlap, these fields are
populated *as well as* direct mapping of the footprint XML fields."""
outstr = output_field("employer", xmlh.get_tag_val(org, "name"))
if ABRIDGED:
return outstr
outstr += FIELDSEP + output_field("quantity",
xmlh.get_tag_val(opp, "volunteersNeeded"))
outstr += FIELDSEP + output_field("image_link",
xmlh.get_tag_val(org, "logoURL"))
# don't map expiration_date -- Base has strict limits (e.g. 2 weeks)
return outstr
sent_start_rx = re.compile(r'((^\s*|[.]\s+)[A-Z])([A-Z0-9 ,;-]{13,})')
def cleanse_snippet(instr):
# convert known XML/XHTML chars
instr = re.sub(r' ', ' ', instr)
instr = re.sub(r'"', '"', instr)
instr = re.sub(r'&(uml|middot|ndash|bull|mdash|hellip);', '-', instr)
# strip \n and \b
instr = re.sub(r'(\\[bn])+', ' ', instr)
# doubly-escaped HTML
instr = re.sub(r'&lt;.+?&gt;', '', instr)
instr = re.sub(r'&(amp;)+([a-z]+);', r'&\2;', instr)
instr = re.sub(r'&#\d+;', '', instr)
# singly-escaped HTML
# </p>, <br/>
instr = re.sub(r'</?[a-zA-Z]+?/?>', '', instr)
# <a href=...>, <font ...>
instr = re.sub(r'<?(font|a|p|img)[^&]*/?>', '', instr, re.IGNORECASE)
# strip leftover XML escaped chars
instr = re.sub(r'&([a-z]+|#[0-9]+);', '', instr)
# strip repeated spaces, so maxlen works
instr = re.sub(r'\s+', ' ', instr)
# fix obnoxious all caps titles and snippets
for str in re.finditer(sent_start_rx, instr):
instr = re.sub(sent_start_rx, str.group(1)+str.group(3).lower(), instr, 1)
return instr
def get_title(opp):
"""compute a clean title. TODO: do this once and cache/memoize it"""
title = cleanse_snippet(output_tag_value(opp, "title"))
for str in re.finditer(lcword_rx, title):
title = re.sub(lcword_rx, str.group(1)+str.group(2).upper(), title, 1)
return title
lcword_rx = re.compile(r'(\s)([a-z])')
def get_event_reqd_fields(opp):
"""Fields required by Google Base, note that they aren't necessarily
used by the FP app."""
outstr = get_title(opp)
outstr += FIELDSEP + output_tag_value(opp, "description")
outstr += FIELDSEP + output_field("link", BASE_PUB_URL)
return outstr
def get_feed_fields(feedinfo):
"""Fields from the <Feed> portion of FPXML."""
outstr = output_tag_value_renamed(feedinfo,
"providerName", "feed_providerName")
if ABRIDGED:
return outstr
outstr += FIELDSEP + output_tag_value(feedinfo, "feedID")
outstr += FIELDSEP + output_tag_value_renamed(
feedinfo, "providerID", "feed_providerID")
outstr += FIELDSEP + output_tag_value_renamed(
feedinfo, "providerURL", "feed_providerURL")
outstr += FIELDSEP + output_tag_value_renamed(
feedinfo, "description", "feed_description")
outstr += FIELDSEP + output_tag_value_renamed(
feedinfo, "createdDateTime", "feed_createdDateTime")
return outstr
def output_opportunity(opp, feedinfo, known_orgs, totrecs):
"""main function for outputting a complete opportunity."""
outstr = ""
opp_id = xmlh.get_tag_val(opp, "volunteerOpportunityID")
if (opp_id == ""):
print_progress("no opportunityID")
return totrecs, ""
org_id = xmlh.get_tag_val(opp, "sponsoringOrganizationID")
if (org_id not in known_orgs):
print_progress("unknown sponsoringOrganizationID: " +\
org_id + ". skipping opportunity " + opp_id)
return totrecs, ""
org = known_orgs[org_id]
opp_locations = opp.getElementsByTagName("location")
opp_times = opp.getElementsByTagName("dateTimeDuration")
repeated_fields = get_repeated_fields(feedinfo, opp, org)
if len(opp_times) == 0:
opp_times = [ None ]
for opptime in opp_times:
if opptime == None:
startend = convert_dt_to_gbase("1971-01-01", "00:00:00-00:00", "UTC")
starttime = "0000"
endtime = "2359"
openended = "Yes"
else:
# event_date_range
# e.g. 2006-12-20T23:00:00/2006-12-21T08:30:00, in PST (GMT-8)
start_date = xmlh.get_tag_val(opptime, "startDate")
start_time = xmlh.get_tag_val(opptime, "startTime")
end_date = xmlh.get_tag_val(opptime, "endDate")
end_time = xmlh.get_tag_val(opptime, "endTime")
openended = xmlh.get_tag_val(opptime, "openEnded")
# e.g. 2006-12-20T23:00:00/2006-12-21T08:30:00, in PST (GMT-8)
if (start_date == ""):
start_date = "1971-01-01"
start_time = "00:00:00-00:00"
startend = convert_dt_to_gbase(start_date, start_time, "UTC")
if (end_date != "" and end_date + end_time > start_date + start_time):
endstr = convert_dt_to_gbase(end_date, end_time, "UTC")
startend += "/" + endstr
duration = xmlh.get_tag_val(opptime, "duration")
hrs_per_week = xmlh.get_tag_val(opptime, "commitmentHoursPerWeek")
time_fields = get_time_fields(openended, duration, hrs_per_week, startend)
if len(opp_locations) == 0:
opp_locations = [ None ]
for opploc in opp_locations:
totrecs = totrecs + 1
if PROGRESS and totrecs % 250 == 0:
print_progress(str(totrecs)+" records generated.")
if opploc == None:
locstr, latlng, geocoded_loc = ("", "", "")
loc_fields = get_loc_fields("0.0", "0.0", "0.0", "", "")
else:
locstr = get_full_addr_str(opploc)
addr, lat, lng, acc = find_geocoded_location(opploc)
loc_fields = get_loc_fields("", str(float(lat)+1000.0),
str(float(lng)+1000.0), addr,
xmlh.get_tag_val(opploc, "name"))
opp_id = compute_stable_id(opp, org, locstr, openended, duration,
hrs_per_week, startend)
outstr += output_field("id", opp_id)
outstr += repeated_fields
outstr += time_fields
outstr += loc_fields
outstr += RECORDSEP
return totrecs, outstr
def get_time_fields(openended, duration, hrs_per_week, event_date_range):
"""output time-related fields, e.g. for multiple times per event."""
# 2010-02-26T16:00:00/2010-02-26T16:00:00
match = re.search(r'T(\d\d):(\d\d):\d\d(\s*/\s*.+?T(\d\d):(\d\d):\d\d)?',
event_date_range)
startstr = endstr = ""
if match:
if match.group(2):
startstr = match.group(1) + match.group(2)
else:
# TODO: exception (but need a way to throw exceptions in general)
# e.g. ignore this record, stop this feed, etc.
pass
if match.group(3):
endstr = match.group(4) + match.group(5)
time_fields = FIELDSEP + output_field("event_date_range", event_date_range)
time_fields += FIELDSEP + output_field("startTime", startstr)
time_fields += FIELDSEP + output_field("endTime", endstr)
if ABRIDGED:
return time_fields
time_fields += FIELDSEP + output_field("openended", openended)
time_fields += FIELDSEP + output_field("duration", duration)
time_fields += FIELDSEP + output_field("commitmentHoursPerWeek", hrs_per_week)
return time_fields
def get_loc_fields(location, latitude, longitude, location_string,
venue_name):
"""output location-related fields, e.g. for multiple locations per event."""
loc_fields = FIELDSEP + output_field("location", location)
loc_fields += FIELDSEP + output_field("latitude", latitude)
loc_fields += FIELDSEP + output_field("longitude", longitude)
loc_fields += FIELDSEP + output_field("location_string", location_string)
if ABRIDGED:
return loc_fields
loc_fields += FIELDSEP + output_field("venue_name", venue_name)
return loc_fields
def get_repeated_fields(feedinfo, opp, org):
"""output all fields that are repeated for each time and location."""
repeated_fields = FIELDSEP + get_feed_fields(feedinfo)
repeated_fields += FIELDSEP + get_event_reqd_fields(opp)
repeated_fields += FIELDSEP + get_base_other_fields(opp, org)
repeated_fields += FIELDSEP + get_direct_mapped_fields(opp, org)
return repeated_fields
def output_header(feedinfo, opp, org):
"""fake opportunity printer, which prints the header line instead."""
global PRINTHEAD, HEADER_ALREADY_OUTPUT
# no matter what, only print the header once!
if HEADER_ALREADY_OUTPUT:
return ""
HEADER_ALREADY_OUTPUT = True
PRINTHEAD = True
outstr = output_field("id", "")
repeated_fields = get_repeated_fields(feedinfo, opp, org)
time_fields = get_time_fields("", "", "", "")
loc_fields = get_loc_fields("", "", "", "", "")
PRINTHEAD = False
return outstr + repeated_fields + time_fields + loc_fields + RECORDSEP
def convert_to_footprint_xml(instr, do_fastparse, maxrecs, progress):
"""macro for parsing an FPXML string to XML then format it."""
#if False:
# # grr: RAM explosion, even with pulldom...
# totrecs = 0
# nodes = xml.dom.pulldom.parseString(instr)
# outstr = '<?xml version="1.0" ?>'
# outstr += '<FootprintFeed schemaVersion="0.1">'
# for eltype, node in nodes:
# if eltype == 'START_ELEMENT':
# if node.nodeName == 'VolunteerOpportunity':
# if progress and totrecs > 0 and totrecs % 250 == 0:
# print datetime.now(), ": ", totrecs, " opps processed."
# totrecs = totrecs + 1
# if maxrecs > 0 and totrecs > maxrecs:
# break
# if (node.nodeName == 'FeedInfo' or
# node.nodeName == 'Organization' or
# node.nodeName == 'VolunteerOpportunity'):
# nodes.expandNode(node)
# prettyxml = xmlh.prettyxml(node)
# outstr += prettyxml
# outstr += '</FootprintFeed>'
# return outstr
if do_fastparse:
res, numorgs, numopps = parse_footprint.parse_fast(instr, maxrecs, progress)
return res
else:
# slow parse
xmldoc = parse_footprint.parse(instr, maxrecs, progress)
# TODO: maxrecs
return xmlh.prettyxml(xmldoc)
def convert_to_gbase_events_type(instr, origname, fastparse, maxrecs, progress):
"""non-trivial logic for converting FPXML to google base formatting."""
# todo: maxrecs
outstr = ""
print_progress("convert_to_gbase_events_type...", "", progress)
example_org = None
known_orgs = {}
if fastparse:
known_elnames = [
'FeedInfo', 'FootprintFeed', 'Organization', 'Organizations',
'VolunteerOpportunities', 'VolunteerOpportunity', 'abstract',
'audienceTag', 'audienceTags', 'categoryTag', 'categoryTags',
'city', 'commitmentHoursPerWeek', 'contactEmail', 'contactName',
'contactPhone', 'country', 'createdDateTime', 'dateTimeDuration',
'dateTimeDurationType', 'dateTimeDurations', 'description',
'detailURL', 'directions', 'donateURL', 'duration', 'email',
'endDate', 'endTime', 'expires', 'fax', 'feedID', 'guidestarID',
'iCalRecurrence', 'language', 'latitude', 'lastUpdated', 'location',
'locationType', 'locations', 'logoURL', 'longitude', 'minimumAge',
'missionStatement', 'name', 'nationalEIN', 'openEnded',
'organizationID', 'organizationURL', 'paid', 'phone', 'postalCode',
'providerID', 'providerName', 'providerURL', 'region', 'schemaVersion',
'sexRestrictedEnum', 'sexRestrictedTo', 'skills',
'sponsoringOrganizationID', 'startDate', 'startTime', 'streetAddress1',
'streetAddress2', 'streetAddress3', 'title', 'tzOlsonPath', 'virtual',
'volunteerHubOrganizationID', 'volunteerOpportunityID',
'volunteersFilled', 'volunteersSlots', 'volunteersNeeded', 'yesNoEnum'
]
numopps = 0
feedchunks = re.findall(
re.compile('<FeedInfo>.+?</FeedInfo>', re.DOTALL), instr)
for feedchunk in feedchunks:
print_progress("found FeedInfo.", progress=progress)
feedinfo = xmlh.simple_parser(feedchunk, known_elnames, False)
orgchunks = re.findall(
re.compile('<Organization>.+?</Organization>', re.DOTALL), instr)
for orgchunk in orgchunks:
if progress and len(known_orgs) % 250 == 0:
print_progress(str(len(known_orgs))+" organizations seen.")
org = xmlh.simple_parser(orgchunk, known_elnames, False)
org_id = xmlh.get_tag_val(org, "organizationID")
if (org_id != ""):
known_orgs[org_id] = org
if example_org == None:
example_org = org
oppchunks = re.findall(
re.compile('<VolunteerOpportunity>.+?</VolunteerOpportunity>',
re.DOTALL), instr)
for oppchunk in oppchunks:
opp = xmlh.simple_parser(oppchunk, None, False)
if not HEADER_ALREADY_OUTPUT:
outstr = output_header(feedinfo, opp, example_org)
numopps, spiece = output_opportunity(opp, feedinfo, known_orgs, numopps)
outstr += spiece
if (maxrecs > 0 and numopps > maxrecs):
break
## note: preserves order, so diff works (vs. one sweep per element type)
#chunks = re.findall(
# re.compile('<(?:Organization|VolunteerOpportunity|FeedInfo)>.+?'+
# '</(?:Organization|VolunteerOpportunity|FeedInfo)>',
# re.DOTALL), instr)
#for chunk in chunks:
# node = xmlh.simple_parser(chunk, known_elnames, False)
# if re.search("<FeedInfo>", chunk):
# print_progress("found FeedInfo.", progress=progress)
# feedinfo = xmlh.simple_parser(chunk, known_elnames, False)
# continue
# if re.search("<Organization>", chunk):
# if progress and len(known_orgs) % 250 == 0:
# print_progress(str(len(known_orgs))+" organizations seen.")
# org = xmlh.simple_parser(chunk, known_elnames, False)
# org_id = xmlh.get_tag_val(org, "organizationID")
# if (org_id != ""):
# known_orgs[org_id] = org
# if example_org == None:
# example_org = org
# continue
# if re.search("<VolunteerOpportunity>", chunk):
# global HEADER_ALREADY_OUTPUT
# opp = xmlh.simple_parser(chunk, None, False)
# if numopps == 0:
# # reinitialize
# outstr = output_header(feedinfo, node, example_org)
# numopps, spiece = output_opportunity(opp, feedinfo, known_orgs, numopps)
# outstr += spiece
# if (maxrecs > 0 and numopps > maxrecs):
# break
#numopps = 0
#nodes = xml.dom.pulldom.parseString(instr)
#example_org = None
#for type,node in nodes:
# if type == 'START_ELEMENT':
# if node.nodeName == 'FeedInfo':
# nodes.expandNode(node)
# feedinfo = node
# elif node.nodeName == 'Organization':
# nodes.expandNode(node)
# id = xmlh.get_tag_val(node, "organizationID")
# if (id != ""):
# known_orgs[id] = node
# if example_org == None:
# example_org = node
# elif node.nodeName == 'VolunteerOpportunity':
# nodes.expandNode(node)
# if numopps == 0:
# outstr += output_header(feedinfo, node, example_org)
# numopps, spiece = output_opportunity(node, feedinfo,
# known_orgs, numopps)
# outstr += spiece
else:
# not fastparse
footprint_xml = parse_footprint.parse(instr, maxrecs, progress)
feedinfos = footprint_xml.getElementsByTagName("FeedInfo")
if (feedinfos.length != 1):
print datetime.now(), "bad FeedInfo: should only be one section"
# TODO: throw error
sys.exit(1)
feedinfo = feedinfos[0]
organizations = footprint_xml.getElementsByTagName("Organization")
for org in organizations:
org_id = xmlh.get_tag_val(org, "organizationID")
if (org_id != ""):
known_orgs[org_id] = org
opportunities = footprint_xml.getElementsByTagName("VolunteerOpportunity")
numopps = 0
for opp in opportunities:
if numopps == 0:
outstr += output_header(feedinfo, opp, organizations[0])
numopps, spiece = output_opportunity(opp, feedinfo, known_orgs, numopps)
outstr += spiece
return outstr, len(known_orgs), numopps
def guess_shortname(filename):
"""from the input filename, guess which feed this is."""
if re.search("usa-?service", filename):
return "usaservice"
if re.search(r'meetup', filename):
return "meetup"
if re.search(r'barackobama', filename):
return "mybarackobama"
if re.search(r'united.*way', filename):
return "unitedway"
if re.search(r'americanredcross', filename):
return "americanredcross"
if re.search(r'citizencorps', filename):
return "citizencorps"
if re.search(r'ymca', filename):
return "ymca"
if re.search("habitat", filename):
return "habitat"
if re.search("americansolutions", filename):
return "americansolutions"
if re.search("spreadsheets[.]google[.]com", filename):
return "gspreadsheet"
if re.search("(handson|hot.footprint)", filename):
return "handsonnetwork"
if re.search("(volunteer[.]?gov)", filename):
return "volunteergov"
if re.search("(whichoneis.com|beextra|extraordinari)", filename):
return "extraordinaries"
if re.search("idealist", filename):
return "idealist"
if re.search("(userpostings|/export/Posting)", filename):
return "footprint_userpostings"
if re.search("craigslist", filename):
return "craigslist"
if re.search("americorps", filename):
return "americorps"
if re.search("givingdupage", filename):
return "givingdupage"
if re.search("mlk(_|day)", filename):
return "mlk_day"
if re.search("servenet", filename):
return "servenet"
if re.search(r'(seniorcorps|985148b9e3c5b9523ed96c33de482e3d)', filename):
# note: has to come before volunteermatch
return "seniorcorps"
if re.search(r'(volunteermatch|cfef12bf527d2ec1acccba6c4c159687)', filename):
return "volunteermatch"
if re.search("christianvol", filename):
return "christianvolunteering"
if re.search("volunteer(two|2)", filename):
return "volunteertwo"
if re.search("mentorpro", filename):
return "mentorpro"
if re.search(r'(mpsg_feed|myproj_servegov)', filename):
return "myproj_servegov"
return ""
def ftp_to_base(filename, ftpinfo, instr):
"""ftp the string to base, guessing the feed name from the orig filename."""
ftplib = __import__('ftplib')
stringio = __import__('StringIO')
dest_fn = guess_shortname(filename)
if dest_fn == "":
dest_fn = "footprint1.txt"
else:
dest_fn = dest_fn + "1.gz"
if re.search(r'[.]gz$', dest_fn):
print_progress("compressing data from "+str(len(instr))+" bytes", filename)
gzip_fh = gzip.open(dest_fn, 'wb', 9)
gzip_fh.write(instr)
gzip_fh.close()
data_fh = open(dest_fn, 'rb')
else:
data_fh = stringio.StringIO(instr)
host = 'uploads.google.com'
(user, passwd) = ftpinfo.split(":")
print_progress("connecting to " + host + " as user " + user + "...", filename)
ftp = ftplib.FTP(host)
welcomestr = re.sub(r'\n', '\\n', ftp.getwelcome())
print_progress("FTP server says: "+welcomestr, filename)
ftp.login(user, passwd)
print_progress("uploading filename "+dest_fn, filename)
success = False
while not success:
try:
ftp.storbinary("STOR " + dest_fn, data_fh, 8192)
success = True
except:
# probably ftplib.error_perm: 553: Permission denied on server. (Overwrite)
print_progress("upload failed-- sleeping and retrying...")
time.sleep(1)
if success:
print_progress("done uploading.")
else:
print_progress("giving up.")
ftp.quit()
data_fh.close()
def guess_parse_func(inputfmt, filename):
"""from the filename and the --inputfmt,guess the input type and parse func"""
# for development
if inputfmt == "fpxml":
return "fpxml", parse_footprint.parse_fast
shortname = guess_shortname(filename)
# FPXML providers
fp = parse_footprint
if shortname == "handsonnetwork":
return "fpxml", fp.parser(
'102', 'handsonnetwork', 'handsonnetwork', 'http://handsonnetwork.org/',
'HandsOn Network')
if shortname == "idealist":
return "fpxml", fp.parser(
'103', 'idealist', 'idealist', 'http://www.idealist.org/',
'Idealist')
if shortname == "volunteermatch":
return "fpxml", fp.parser(
'104', 'volunteermatch', 'volunteermatch',
'http://www.volunteermatch.org/', 'Volunteer Match')
if shortname == "volunteergov":
return "fpxml", fp.parser(
'107', 'volunteergov', 'volunteergov', 'http://www.volunteer.gov/',
'volunteer.gov')
if shortname == "extraordinaries":
return "fpxml", fp.parser(
'110', 'extraordinaries', 'extraordinaries', 'http://www.beextra.org/',
'The Extraordinaries')
if shortname == "meetup":
return "fpxml", fp.parser(
'112', 'meetup', 'meetup', 'http://www.meetup.com/',
'Meetup')
if shortname == "americansolutions":
return "fpxml", fp.parser(
'115', 'americansolutions', 'americansolutions',
'http://www.americansolutions.com/',
'American Solutions for Winning the Future')
if shortname == "mybarackobama":
return "fpxml", fp.parser(
'116', 'mybarackobama', 'mybarackobama', 'http://my.barackobama.com/',
'Organizing for America / DNC')
if shortname == "unitedway":
return "fpxml", fp.parser(
'122', 'unitedway', 'unitedway', 'http://www.unitedway.org/',
'United Way')
if shortname == "americanredcross":
return "fpxml", fp.parser(
'123', 'americanredcross', 'americanredcross', 'http://www.givelife.org/',
'American Red Cross')
if shortname == "citizencorps":
return "fpxml", fp.parser(
'124', 'citizencorps', 'citizencorps', 'http://citizencorps.gov/',
'Citizen Corps / FEMA')
if shortname == "ymca":
return "fpxml", fp.parser(
'126', 'ymca', 'ymca', 'http://www.ymca.net/',
'YMCA')
if shortname == "habitat":
parser = fp.parser(
'111', 'habitat', 'habitat',
'http://www.habitat.org/', 'Habitat for Humanity')
def parse_habitat(instr, maxrecs, progress):
# fixup bad escaping
newstr = re.sub(r'&code=', '&code=', instr)
return parser(newstr, maxrecs, progress)
return "habitat", parse_habitat
# networkforgood providers
nfg = parse_networkforgood
if shortname == "americorps":
return "nfg", nfg.parser(
'106', 'americorps', 'americorps', 'http://www.americorps.gov/',
'AmeriCorps')
if shortname == "servenet":
return "nfg", nfg.parser(
'114', 'servenet', 'servenet', 'http://www.servenet.org/',
'servenet')
if shortname == "mlk_day":
return "nfg", nfg.parser(
'115', 'mlk_day', 'mlk_day', 'http://my.mlkday.gov/',
'Martin Luther King day')
if shortname == "christianvolunteering":
return "nfg", nfg.parser(
'117', 'christianvolunteering', 'christianvolunteering',
'http://www.christianvolunteering.org/', 'Christian Volunteering')
if shortname == "volunteertwo":
return "nfg", nfg.parser(
'118', 'volunteer2', 'volunteer2',
'http://www.volunteer2.com/', 'Volunteer2')
if shortname == "mentorpro":
return "nfg", nfg.parser(
'119', 'mentor', 'mentor',
'http://www.mentorpro.org/', 'MENTOR')
if shortname == "myproj_servegov":
return "nfg", nfg.parser(
'120', 'myproj_servegov', 'myproj_servegov',
'http://myproject.serve.gov/', 'MyprojectServeGov')
if shortname == "seniorcorps":
return "nfg", nfg.parser(
'121', 'seniorcorps', 'seniorcorps',
'http://www.seniorcorps.gov/', 'SeniorCorps')
if shortname == "givingdupage":
return "nfg", nfg.parser(
'125', 'givingdupage', 'givingdupage', 'http://www.dupageco.org/',
'Giving Dupage')
# custom formats
if shortname == "gspreadsheet":
return "gspreadsheet", pgs.parse
if shortname == "usaservice" or shortname == "usasvc":
return "usaservice", parse_usaservice.parse
if shortname == "craigslist" or shortname == "cl":
return "craigslist", parse_craigslist.parse
# legacy-- to be safe, remove after 9/1/2009
#if shortname == "volunteermatch" or shortname == "vm":
# return "volunteermatch", parse_volunteermatch.parse
#if shortname == "idealist":
# return "idealist", parse_idealist.parse
print datetime.now(), "couldn't guess input format-- try --inputfmt"
sys.exit(1)
def clean_input_string(instr):
"""run various cleanups for low-level encoding issues."""
def cleaning_progress(msg):
"""macro"""
print_progress(msg+": "+str(len(instr))+" bytes.")
cleaning_progress("read file")
instr = re.sub(r'\r\n?', "\n", instr)
cleaning_progress("filtered DOS newlines")
instr = re.sub(r'(?:\t|	)', " ", instr)
cleaning_progress("filtered tabs")
instr = re.sub(r'\xc2?[\x93\x94\222]', "'", instr)
cleaning_progress("filtered iso8859-1 single quotes")
instr = re.sub(r'\xc2?[\223\224]', '"', instr)
cleaning_progress("filtered iso8859-1 double quotes")
instr = re.sub(r'\xc2?[\225\226\227]', "-", instr)
cleaning_progress("filtered iso8859-1 dashes")
instr = xmlh.clean_string(instr)
cleaning_progress("filtered nonprintables")
return instr
def parse_options():
"""parse cmdline options"""
global DEBUG, PROGRESS, FIELDSEP, RECORDSEP, ABRIDGED, OUTPUTFMT
parser = OptionParser("usage: %prog [options] sample_data.xml ...")
parser.set_defaults(geocode_debug=False)
parser.set_defaults(debug=False)
parser.set_defaults(abridged=False)
parser.set_defaults(progress=False)
parser.set_defaults(debug_input=False)
parser.set_defaults(outputfmt="basetsv")
parser.set_defaults(output="")
parser.set_defaults(compress_output=False)
parser.set_defaults(test=False)
parser.set_defaults(clean=True)
parser.set_defaults(maxrecs=-1)
parser.add_option("-d", "--dbg", action="store_true", dest="debug")
parser.add_option("--abridged", action="store_true", dest="abridged")
parser.add_option("--noabridged", action="store_false", dest="abridged")
parser.add_option("--clean", action="store_true", dest="clean")
parser.add_option("--noclean", action="store_false", dest="clean")
parser.add_option("--inputfmt", action="store", dest="inputfmt")
parser.add_option("--test", action="store_true", dest="test")
parser.add_option("--dbginput", action="store_true", dest="debug_input")
parser.add_option("--progress", action="store_true", dest="progress")
parser.add_option("--outputfmt", action="store", dest="outputfmt")
parser.add_option("--output", action="store", dest="output")
parser.add_option("--compress_output", action="store_true",
dest="compress_output")
parser.add_option("--nocompress_output", action="store_false",
dest="compress_output")
parser.add_option("-g", "--geodbg", action="store_true", dest="geocode_debug")
parser.add_option("--ftpinfo", dest="ftpinfo")
parser.add_option("--fs", "--fieldsep", action="store", dest="fs")
parser.add_option("--rs", "--recordsep", action="store", dest="rs")
parser.add_option("-n", "--maxrecords", action="store", dest="maxrecs")
(options, args) = parser.parse_args(sys.argv[1:])
if (len(args) == 0):
parser.print_help()
sys.exit(0)
if options.fs != None:
FIELDSEP = options.fs
if options.rs != None:
RECORDSEP = options.rs
if (options.debug):
DEBUG = True
geocoder.GEOCODE_DEBUG = True
PROGRESS = True
geocoder.SHOW_PROGRESS = True
FIELDSEP = "\n"
if (options.abridged):
ABRIDGED = True
if (options.geocode_debug):
geocoder.GEOCODE_DEBUG = True
if options.test:
options.progress = True
if (options.progress):
PROGRESS = True
geocoder.SHOW_PROGRESS = True
if options.ftpinfo and not options.outputfmt:
options.outputfmt = "basetsv"
OUTPUTFMT = options.outputfmt
return options, args
def open_input_filename(filename):
"""handle different file/URL opening methods."""
if re.search(r'^https?://', filename):
print_progress("starting download of "+filename)
outfh = urllib.urlopen(filename)
if (re.search(r'[.]gz$', filename)):
# is there a way to fetch and unzip an URL in one shot?
print_progress("ah, gzip format.")
content = outfh.read()
outfh.close()
print_progress("download done.")
tmp_fn = "/tmp/tmp-"+hashlib.md5().hexdigest()
tmpfh = open(tmp_fn, "wb+")
tmpfh.write(content)
tmpfh.close()
outfh = gzip.open(tmp_fn, 'rb')
return outfh
elif re.search(r'[.]gz$', filename):
return gzip.open(filename, 'rb')
elif filename == "-":
return sys.stdin
return open(filename, 'rb')
def test_parse(footprint_xmlstr, maxrecs):
"""run the data through and then re-parse the output."""
print datetime.now(), "testing input: generating Footprint XML..."
fpxml = convert_to_footprint_xml(footprint_xmlstr, True, int(maxrecs), True)
# free some RAM
del footprint_xmlstr
print datetime.now(), "testing input: parsing and regenerating FPXML..."
fpxml2 = convert_to_footprint_xml(fpxml, True, int(maxrecs), True)
print datetime.now(), "testing input: comparing outputs..."
hash1 = hashlib.md5(fpxml).hexdigest()
hash2 = hashlib.md5(fpxml2).hexdigest()
fn1 = "/tmp/pydiff-"+hash1
fn2 = "/tmp/pydiff-"+hash2
if hash1 == hash2:
print datetime.now(), "success: getting head...\n"
outfh = open(fn1, "w+")
outfh.write(fpxml)
outfh.close()
subprocess.call(['head', fn1])
else:
print datetime.now(), "errors-- hash1=" + hash1 + " hash2=" + \
hash2 + " running diff", fn1, fn2
outfh = open(fn1, "w+")
outfh.write(fpxml)
outfh.close()
outfh = open(fn2, "w+")
outfh.write(fpxml2)
outfh.close()
subprocess.call(['diff', fn1, fn2])
# grr-- difflib performance sucks
#for line in difflib.unified_diff(fpxml, fpxml2, \
# fromfile='(first output)', tofile='(second output)'):
#print line
def process_file(filename, options, providerName="", providerID="",
providerURL=""):
shortname = guess_shortname(filename)
inputfmt, parsefunc = guess_parse_func(options.inputfmt, filename)
infh = open_input_filename(filename)
print_progress("reading data...")
# don't put this inside open_input_filename() because it could be large
instr = infh.read()
print_progress("done reading data.")
# remove bad encodings etc.
if options.clean:
instr = clean_input_string(instr)
# split nasty XML inputs, to help isolate problems
if options.debug_input:
instr = re.sub(r'><', r'>\n<', instr)
print_progress("inputfmt: "+inputfmt)
print_progress("outputfmt: "+options.outputfmt)
print_status("input data: "+str(len(instr))+" bytes", shortname)
print_progress("parsing...")
footprint_xmlstr, numorgs, numopps = \
parsefunc(instr, int(options.maxrecs), PROGRESS)
if (providerID != "" and
footprint_xmlstr.find('<providerID></providerID>')):
footprint_xmlstr = re.sub(
'<providerID></providerID>',
'<providerID>%s</providerID>' % providerID, footprint_xmlstr)
if (providerName != "" and
footprint_xmlstr.find('<providerName></providerName>')):
footprint_xmlstr = re.sub(
'<providerName></providerName>',
'<providerName>%s</providerName>' % providerName, footprint_xmlstr)
if (providerURL != "" and
footprint_xmlstr.find('<providerURL></providerURL>')):
footprint_xmlstr = re.sub(
'<providerURL></providerURL>',
'<providerURL>%s</providerURL>' % providerURL, footprint_xmlstr)
if options.test:
# free some RAM
del instr
test_parse(footprint_xmlstr, options.maxrecs)
sys.exit(0)
fastparse = not options.debug_input
if OUTPUTFMT == "fpxml":
# TODO: pretty printing option
print convert_to_footprint_xml(footprint_xmlstr, fastparse,
int(options.maxrecs), PROGRESS)
sys.exit(0)
if OUTPUTFMT != "basetsv":
print >> sys.stderr, datetime.now(), \
"--outputfmt not implemented: try 'basetsv','fpbasetsv' or 'fpxml'"
sys.exit(1)
outstr, numorgs, numopps = convert_to_gbase_events_type(
footprint_xmlstr, shortname, fastparse, int(options.maxrecs), PROGRESS)
return len(footprint_xmlstr), numorgs, numopps, outstr
def main():
"""main function for cmdline execution."""
start_time = datetime.now()
options, args = parse_options()
filename = args[0]
if re.search("spreadsheets[.]google[.]com", filename):
if OUTPUTFMT == "fpxml":
pgs.parser_error("FPXML format not supported for "+
"spreadsheet-of-spreadsheets")
sys.exit(1)
match = re.search(r'key=([^& ]+)', filename)
url = "http://spreadsheets.google.com/feeds/cells/" + match.group(1)
url += "/1/public/basic"
# to avoid hitting 80 columns
data = {}
updated = {}
if PROGRESS:
print "processing master spreadsheet", url
maxrow, maxcol = pgs.read_gspreadsheet(url, data, updated, PROGRESS)
header_row, header_startcol = pgs.find_header_row(data, 'provider name')
# check to see if there's a header-description row
header_desc = pgs.cellval(data, header_row+1, header_startcol)
if not header_desc:
pgs.parser_error("blank row not allowed below header row")
sys.exit(1)
header_desc = header_desc.lower()
data_startrow = header_row + 1
if header_desc.find("example") >= 0:
data_startrow += 1
bytes = numorgs = numopps = 0
outstr = ""
for row in range(data_startrow, int(maxrow)+1):
providerName = pgs.cellval(data, row, header_startcol)
if providerName is None or providerName == "":
if PROGRESS:
print "missing provider name from row "+str(row)
break
providerID = pgs.cellval(data, row, header_startcol+1)
if providerID is None or providerID == "":
if PROGRESS:
print "missing provider ID from row "+str(row)
break
providerURL = pgs.cellval(data, row, header_startcol+2)
if providerURL is None or providerURL == "":
if PROGRESS:
print "missing provider URL from row "+str(row)
break
match = re.search(r'key=([^& ]+)', providerURL)
providerURL = "http://spreadsheets.google.com/feeds/cells/"
providerURL += match.group(1)
providerURL += "/1/public/basic"
if PROGRESS:
print "processing spreadsheet", providerURL, "name="+providerName
providerBytes, providerNumorgs, providerNumopps, tmpstr = process_file(
providerURL, options, providerName, providerID, providerURL)
if PROGRESS:
print "done processing spreadsheet: name="+providerName, \
"records="+str(providerNumopps), \
"url="+providerURL
bytes += providerBytes
numorgs += providerNumorgs
numopps += providerNumopps
outstr += tmpstr
else:
bytes, numorgs, numopps, outstr = process_file(filename, options)
#only need this if Base quoted fields it enabled
#outstr = re.sub(r'"', r'"', outstr)
if (options.ftpinfo):
ftp_to_base(filename, options.ftpinfo, outstr)
elif options.output == "":
print outstr,
elif options.compress_output:
gzip_fh = gzip.open(options.output, 'wb', 9)
gzip_fh.write(outstr)
gzip_fh.close()
else:
outfh = open(options.output, "w")
outfh.write(outstr)
outfh.close()
elapsed = datetime.now() - start_time
# NOTE: if you change this, you also need to update datahub/load_gbase.py
# and frontend/views.py to avoid breaking the dashboard-- other status
# messages don't matter.
shortname = guess_shortname(filename)
xmlh.print_status("done parsing: output " + str(numorgs) + " organizations" +
" and " + str(numopps) + " opportunities" +
" (" + str(bytes) + " bytes): " +
str(int(elapsed.seconds/60)) + " minutes.",
shortname, PROGRESS)
if __name__ == "__main__":
main()
| Python |
#!/usr/bin/env python
#
"""
script for loading into googlebase.
Usage: load_gbase.py username password
"""
import sys
import re
import gzip
import bz2
import logging
import subprocess
from datetime import datetime
import footprint_lib
USERNAME = ""
PASSWORD = ""
LOGPATH = "/home/footprint/public_html/datahub/dashboard/"
LOG_FN = "load_gbase.log"
LOG_FN_BZ2 = "load_gbase.log.bz2"
DETAILED_LOG_FN = "load_gbase_detail.log"
# this file needs to be copied over to frontend/autocomplete/
POPULAR_WORDS_FN = "popular_words.txt"
FIELD_STATS_FN = "field_stats.txt"
GEO_STATS_FN = "geo_stats.txt"
STOPWORDS = set([
'a', 'about', 'above', 'across', 'after', 'afterwards', 'again', 'against',
'all', 'almost', 'alone', 'along', 'already', 'also', 'although', 'always',
'am', 'among', 'amongst', 'amoungst', 'amount', 'an', 'and', 'another', 'any',
'anyhow', 'anyone', 'anything', 'anyway', 'anywhere', 'are', 'around', 'as',
'at', 'back', 'be', 'became', 'because', 'become', 'becomes', 'becoming',
'been', 'before', 'beforehand', 'behind', 'being', 'below', 'beside',
'besides', 'between', 'beyond', 'bill', 'both', 'bottom', 'but', 'by', 'call',
'can', 'cannot', 'cant', 'co', 'computer', 'con', 'could', 'couldnt', 'cry',
'de', 'describe', 'detail', 'do', 'done', 'down', 'due', 'during', 'each',
'eg', 'eight', 'either', 'eleven', 'else', 'elsewhere', 'empty', 'enough',
'etc', 'even', 'ever', 'every', 'everyone', 'everything', 'everywhere',
'except', 'few', 'fifteen', 'fify', 'fill', 'find', 'fire', 'first', 'five',
'for', 'former', 'formerly', 'forty', 'found', 'four', 'from', 'front','full',
'further', 'get', 'give', 'go', 'had', 'has', 'hasnt', 'have', 'he', 'hence',
'her', 'here', 'hereafter', 'hereby', 'herein', 'hereupon', 'hers', 'herself',
'him', 'himself', 'his', 'how', 'however', 'hundred', 'i', 'ie', 'if', 'in',
'inc', 'indeed', 'interest', 'into', 'is', 'it', 'its', 'itself', 'keep',
'last', 'latter', 'latterly', 'least', 'less', 'ltd', 'made', 'many', 'may',
'me', 'meanwhile', 'might', 'mill', 'mine', 'more', 'moreover', 'most',
'mostly', 'move', 'much', 'must', 'my', 'myself', 'name', 'namely', 'neither',
'never', 'nevertheless', 'next', 'nine', 'no', 'nobody', 'none', 'noone',
'nor', 'not', 'nothing', 'now', 'nowhere', 'of', 'off', 'often', 'on', 'once',
'one', 'only', 'onto', 'or', 'other', 'others', 'otherwise', 'our', 'ours',
'ourselves', 'out', 'over', 'own', 'part', 'per', 'perhaps', 'please', 'put',
'rather', 're', 'same', 'see', 'seem', 'seemed', 'seeming', 'seems',
'serious', 'several', 'she', 'should', 'show', 'side', 'since', 'sincere',
'six', 'sixty', 'so', 'some', 'somehow', 'someone', 'something', 'sometime',
'sometimes', 'somewhere', 'still', 'such', 'system', 'take', 'ten', 'than',
'that', 'the', 'their', 'them', 'themselves', 'then', 'thence', 'there',
'thereafter', 'thereby', 'therefore', 'therein', 'thereupon', 'these', 'they',
'thick', 'thin', 'third', 'this', 'those', 'though', 'three', 'through',
'throughout', 'thru', 'thus', 'to', 'together', 'too', 'top', 'toward',
'towards', 'twelve', 'twenty', 'two', 'un', 'under', 'until', 'up', 'upon',
'us', 'very', 'via', 'was', 'we', 'well', 'were', 'what', 'whatever', 'when',
'whence', 'whenever', 'where', 'whereafter', 'whereas', 'whereby', 'wherein',
'whereupon', 'wherever', 'whether', 'which', 'while', 'whither', 'who',
'whoever', 'whole', 'whom', 'whose', 'why', 'will', 'with', 'within',
'without', 'would', 'yet', 'you', 'your', 'yours', 'yourself', 'yourselves',
# custom stopwords for footprint
'url', 'amp', 'quot', 'help', 'http', 'search', 'nbsp', 'need', 'cache',
'vol', 'housingall', 'wantedall', 'personalsall', 'net', 'org', 'www',
'gov', 'yes', 'no', '999',
])
def print_progress(msg):
"""print progress message-- shutup pylint"""
print str(datetime.now())+": "+msg
KNOWN_WORDS = {}
def process_popular_words(content):
"""update the dictionary of popular words."""
# TODO: handle phrases (via whitelist, then later do something smart.
print_progress("cleaning content: %d bytes" % len(content))
cleaner_regexp = re.compile('<[^>]*>', re.DOTALL)
cleaned_content = re.sub(cleaner_regexp, '', content).lower()
print_progress("splitting words, %d bytes" % len(cleaned_content))
words = re.split(r'[^a-zA-Z0-9]+', cleaned_content)
print_progress("loading words")
for word in words:
# ignore common english words
if word in STOPWORDS:
continue
# ignore short words
if len(word) <= 2:
continue
if word not in KNOWN_WORDS:
KNOWN_WORDS[word] = 0
KNOWN_WORDS[word] += 1
print_progress("cleaning rare words from %d words" % len(KNOWN_WORDS))
for word in KNOWN_WORDS.keys():
if KNOWN_WORDS[word] < 2:
del KNOWN_WORDS[word]
print_progress("done: word dict size %d words" % len(KNOWN_WORDS))
def print_word_stats():
"""dump word stats."""
print_progress("final cleanse: keeping only words appearing 10 times")
for word in KNOWN_WORDS.keys():
if KNOWN_WORDS[word] < 10:
del KNOWN_WORDS[word]
sorted_words = list(KNOWN_WORDS.iteritems())
sorted_words.sort(cmp=lambda a, b: cmp(b[1], a[1]))
print_progress("writing "+POPULAR_WORDS_FN+"...")
popfh = open(LOGPATH+POPULAR_WORDS_FN, "w")
for word, freq in sorted(sorted_words):
popfh.write(str(freq)+"\t"+word+"\n")
popfh.close()
print_progress("done writing "+LOGPATH+POPULAR_WORDS_FN)
FIELD_VALUES = None
FIELD_NAMES = None
NUM_RECORDS_TOTAL = 0
LATLNG_DENSITY = {}
def process_field_stats(content):
"""update the field-value histograms."""
global FIELD_NAMES, FIELD_VALUES, NUM_RECORDS_TOTAL
for lineno, line in enumerate(content.splitlines()):
fields = line.split("\t")
if lineno == 0:
if FIELD_NAMES == None:
FIELD_NAMES = fields
FIELD_VALUES = [{} for i in range(len(fields))]
continue
NUM_RECORDS_TOTAL += 1
lat_val = lng_val = None
for i, val in enumerate(fields):
if lat_val is None and FIELD_NAMES[i].find('latitude') >= 0:
lat_val = val
if lng_val is None and FIELD_NAMES[i].find('longitude') >= 0:
lng_val = val
val = val[0:300]
if val in FIELD_VALUES[i]:
FIELD_VALUES[i][val] += 1
else:
FIELD_VALUES[i][val] = 1
lat_fltval = float(lat_val)
if lat_fltval > 500.0:
lat_fltval -= 1000.0
lng_fltval = float(lng_val)
if lng_fltval > 500.0:
lng_fltval -= 1000.0
lat_val = re.sub(r'([.]\d\d)\d+', r'\1', str(lat_fltval))
lng_val = re.sub(r'([.]\d\d)\d+', r'\1', str(lng_fltval))
latlng = lat_val + ',' + lng_val
if latlng in LATLNG_DENSITY:
LATLNG_DENSITY[latlng] += 1
else:
LATLNG_DENSITY[latlng] = 1
def print_field_stats():
"""dump field-value stats."""
print_progress("writing "+FIELD_STATS_FN+"...")
outfh = open(LOGPATH+FIELD_STATS_FN, "w")
outfh.write("number of records: "+str(NUM_RECORDS_TOTAL)+"\n")
for i, fieldname in enumerate(FIELD_NAMES):
outfh.write("field "+fieldname+":uniqvals="+str(len(FIELD_VALUES[i]))+"\n")
sorted_vals = list(FIELD_VALUES[i].iteritems())
sorted_vals.sort(cmp=lambda a, b: cmp(b[1], a[1]))
for val, freq in sorted_vals[0:1000]:
if freq < 10:
break
outfh.write(" %5d %s\n" % (freq, val))
outfh.close()
print_progress("done writing "+FIELD_STATS_FN)
def print_geo_stats():
print_progress("writing "+GEO_STATS_FN+"...")
outfh = open(LOGPATH+GEO_STATS_FN, "w")
for latlng, freq in LATLNG_DENSITY.iteritems():
outfh.write("%s %d\n" % (latlng, freq))
outfh.close()
print_progress("done writing "+GEO_STATS_FN)
def append_log(outstr):
"""append to the detailed and truncated log, for stats collection."""
outfh = open(LOGPATH+DETAILED_LOG_FN, "a")
outfh.write(outstr)
outfh.close()
outfh = open(LOGPATH+LOG_FN, "a")
for line in outstr.split('\n'):
if re.search(r'(STATUS|ERROR)', line):
outfh.write(line+"\n")
outfh.close()
# create a bzip2 file from the log
infh = open(LOGPATH+LOG_FN, "r")
data = infh.read()
infh.close()
outfh = bz2.BZ2File(LOGPATH+LOG_FN_BZ2, "w")
outfh.write(data)
outfh.close()
def error_exit(msg):
"""Print an error message to stderr and exit."""
print >> sys.stderr, msg
sys.exit(1)
# Use a shell for subcommands on Windows to get a PATH search.
USE_SHELL = sys.platform.startswith("win")
def run_shell_with_retcode(command, print_output=False,
universal_newlines=True):
"""Executes a command and returns the output from stdout and the return code.
Args:
command: Command to execute.
print_output: If True, the output is printed to stdout.
If False, both stdout and stderr are ignored.
universal_newlines: Use universal_newlines flag (default: True).
Returns:
Tuple (output, return code)
"""
logging.info("Running %s", command)
proc = subprocess.Popen(command, stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=USE_SHELL,
universal_newlines=universal_newlines)
if print_output:
output_array = []
while True:
line = proc.stdout.readline()
if not line:
break
print line.strip("\n")
output_array.append(line)
output = "".join(output_array)
else:
output = proc.stdout.read()
proc.wait()
errout = proc.stderr.read()
if print_output and errout:
print >> sys.stderr, errout
proc.stdout.close()
proc.stderr.close()
append_log(output)
append_log(errout)
return output, errout, proc.returncode
def run_shell(command, silent_ok=False, universal_newlines=True,
print_output=False):
"""run a shell command."""
stdout, stderr, retcode = run_shell_with_retcode(command, print_output,
universal_newlines)
#if retcode and retcode != 0:
#error_exit("Got error status from %s:\n%s\n%s" % (command, stdout, stderr))
if not silent_ok and not stdout:
error_exit("No output from %s" % command)
return stdout, stderr, retcode
def load_gbase(name, url, do_processing=True, do_ftp=True):
"""shutup pylint."""
print_progress("loading "+name+" from "+url)
# run as a subprocess so we can ignore failures and keep going.
# later, we'll run these concurrently, but for now we're RAM-limited.
# ignore retcode
# match the filenames to the feed filenames in Google Base, so we can
# manually upload for testing.
tsv_filename = name+"1.gz"
if do_processing:
stdout, stderr, retcode = run_shell(["./footprint_lib.py", "--progress",
#"--ftpinfo", USERNAME+":"+PASSWORD,
"--output", tsv_filename, url,
"--compress_output" ],
silent_ok=True, print_output=False)
print stdout,
if stderr and stderr != "":
print name+":STDERR: ", re.sub(r'\n', '\n'+name+':STDERR: ', stderr)
if retcode and retcode != 0:
print name+":RETCODE: "+str(retcode)
print "reading TSV data..."
gzip_fh = gzip.open(tsv_filename, 'r')
tsv_data = gzip_fh.read()
gzip_fh.close()
print "processing field stats..."
process_field_stats(tsv_data)
print "processing popular words..."
process_popular_words(tsv_data)
if do_ftp:
print_progress("ftp'ing to base")
footprint_lib.PROGRESS = True
footprint_lib.ftp_to_base(name, USERNAME+":"+PASSWORD, tsv_data)
print_progress("load_gbase: done.")
def test_loaders():
"""for testing, read from local disk as much as possible."""
load_gbase("americanredcross", "americanredcross.xml", False, False)
load_gbase("mlk_day", "mlk_day.xml", False, False)
load_gbase("gspreadsheets",
"https://spreadsheets.google.com/ccc?key=rOZvK6aIY7HgjO-hSFKrqMw", False, False)
load_gbase("craigslist", "craigslist-cache.txt", False, False)
def loaders():
"""put all loaders in one function for easier testing."""
load_gbase("americanredcross", "americanredcross.xml")
load_gbase("americansolutions", "americansolutions.xml")
load_gbase("americorps", "americorps.xml")
load_gbase("christianvolunteering", "christianvolunteering.xml")
load_gbase("citizencorps", "citizencorps.xml")
load_gbase("extraordinaries", "extraordinaries.xml")
load_gbase("givingdupage", "givingdupage.xml")
load_gbase("habitat", "habitat.xml")
load_gbase("handsonnetwork", "handsonnetwork.xml")
load_gbase("idealist", "idealist.xml")
load_gbase("meetup", "meetup.xml")
load_gbase("mentorpro", "mentorpro.xml")
load_gbase("mlk_day", "mlk_day.xml")
load_gbase("mybarackobama", "mybarackobama.xml")
load_gbase("myproj_servegov", "myproj_servegov.xml")
load_gbase("seniorcorps", "seniorcorps.xml")
load_gbase("servenet", "servenet.xml")
load_gbase("unitedway", "unitedway.xml")
load_gbase("volunteergov", "volunteergov.xml")
load_gbase("volunteermatch", "volunteermatch.xml")
load_gbase("volunteertwo", "volunteertwo.xml")
load_gbase("ymca", "ymca.xml")
# requires special crawling
load_gbase("gspreadsheets",
"https://spreadsheets.google.com/ccc?key=rOZvK6aIY7HgjO-hSFKrqMw")
# note: craiglist crawler is run asynchronously, hence the local file
load_gbase("craigslist", "craigslist-cache.txt")
# out for launch
# load_gbase("mybarackobama",
# "http://my.barackobama.com/page/event/search_results?"+
# "format=footprint")
# old custom feed
# legacy-- to be safe, remove after 9/1/2009
#load_gbase("idealist", "http://feeds.idealist.org/xml/feeds/"+
# "Idealist-VolunteerOpportunity-VOLUNTEER_OPPORTUNITY_TYPE."+
# "en.open.atom.gz")
def main():
"""shutup pylint."""
global USERNAME, PASSWORD
if len(sys.argv) < 3:
print "Usage:", sys.argv[0], "<gbase username> <password>"
sys.exit(1)
USERNAME = sys.argv[1]
PASSWORD = sys.argv[2]
if USERNAME == "test":
global LOGPATH
LOGPATH = "./"
test_loaders()
else:
loaders()
print_word_stats()
print_field_stats()
print_geo_stats()
if __name__ == "__main__":
main()
| Python |
# Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
parser for idealist, which (IIRC) originates from Base?
"""
import xml_helpers as xmlh
import re
from datetime import datetime
import xml.sax.saxutils
import dateutil.parser
# xml parser chokes on namespaces, and since we don't need them,
# just replace them for simplicity-- note that this also affects
# the code below
def remove_g_namespace(s, progress):
if progress:
print datetime.now(), "removing g: namespace..."
s = re.sub(r'<(/?)g:', r'<\1gg_', s)
if progress:
print datetime.now(), "removing awb: namespace..."
s = re.sub(r'<(/?)awb:', r'<\1awb_', s)
return s
def addCdataToContent(s, progress):
# what if CDATA is already used?!
if progress:
print datetime.now(), "adding CDATA to <content>..."
## yuck: this caused a RAM explosion...
#rx = re.compile(r'<content( *?[^>]*?)>(.+?)</content>', re.DOTALL)
#s = re.sub(rx, r'<content\1><![CDATA[\2]]></content>', s)
s = re.sub(r'<content([^>]+)>', r'<content\1><![CDATA[', s)
if progress:
print datetime.now(), "adding ]]> to </content>..."
s = re.sub(r'</content>', r']]></content>', s)
if progress:
print datetime.now(), "done: ", len(s), " bytes"
return s
def removeContentWrapperDiv(s):
return re.sub(r'(.*?<div.*?>|</div>)', '', s).strip()
# frees memory for main parse
def ParseHelper(instr, maxrecs, progress):
# TODO: progress
known_elnames = ['feed', 'title', 'subtitle', 'div', 'span', 'updated', 'id', 'link', 'icon', 'logo', 'author', 'name', 'uri', 'email', 'rights', 'entry', 'published', 'gg_publish_date', 'gg_expiration_date', 'gg_event_date_range', 'gg_start', 'gg_end', 'updated', 'category', 'summary', 'content', 'awb_city', 'awb_country', 'awb_state', 'awb_postalcode', 'gg_location', 'gg_age_range', 'gg_employer', 'gg_job_type', 'gg_job_industry', 'awb_paid', ]
# takes forever
#xmldoc = xmlh.simple_parser(s, known_elnames, progress)
# convert to footprint format
s = '<?xml version="1.0" ?>'
s += '<FootprintFeed schemaVersion="0.1">'
s += '<FeedInfo>'
# TODO: assign provider IDs?
s += '<feedID>1</feedID>'
s += '<providerID>103</providerID>'
s += '<providerName>idealist.org</providerName>'
s += '<providerURL>http://www.idealist.org/</providerURL>'
match = re.search(r'<title>(.+?)</title>', instr, re.DOTALL)
if match:
s += '<description>%s</description>' % (match.group(1))
s += '<createdDateTime>%s</createdDateTime>' % xmlh.current_ts()
s += '</FeedInfo>'
numorgs = numopps = 0
# hardcoded: Organization
s += '<Organizations>'
#authors = xmldoc.getElementsByTagName("author")
organizations = {}
authors = re.findall(r'<author>.+?</author>', instr, re.DOTALL)
for i, orgstr in enumerate(authors):
if progress and i > 0 and i % 250 == 0:
print datetime.now(), ": ", i, " orgs processed."
org = xmlh.simple_parser(orgstr, known_elnames, False)
s += '<Organization>'
s += '<organizationID>%d</organizationID>' % (i+1)
s += '<nationalEIN></nationalEIN>'
s += '<guidestarID></guidestarID>'
name = xmlh.get_tag_val(org, "name")
organizations[name] = i+1
s += '<name>%s</name>' % (organizations[name])
s += '<missionStatement></missionStatement>'
s += '<description></description>'
s += '<location><city></city><region></region><postalCode></postalCode></location>'
s += '<organizationURL>%s</organizationURL>' % (xmlh.get_tag_val(org, "uri"))
s += '<donateURL></donateURL>'
s += '<logoURL></logoURL>'
s += '<detailURL></detailURL>'
s += '</Organization>'
numorgs += 1
s += '</Organizations>'
s += '<VolunteerOpportunities>'
entries = re.findall(r'<entry>.+?</entry>', instr, re.DOTALL)
#entries = xmldoc.getElementsByTagName("entry")
#if (maxrecs > entries.length):
# maxrecs = entries.length
#for opp in entries[0:maxrecs-1]:
for i, oppstr in enumerate(entries):
if (maxrecs>0 and i>maxrecs):
break
xmlh.print_rps_progress("opps", progress, i, maxrecs)
opp = xmlh.simple_parser(oppstr, known_elnames, False)
# unmapped: db:rsvp (seems to be same as link, but with #rsvp at end of url?)
# unmapped: db:host (no equivalent?)
# unmapped: db:county (seems to be empty)
# unmapped: attendee_count
# unmapped: guest_total
# unmapped: db:title (dup of title, above)
# unmapped: contactName
s += '<VolunteerOpportunity>'
id_link = xmlh.get_tag_val(opp, "id")
s += '<volunteerOpportunityID>%s</volunteerOpportunityID>' % (id_link)
orgname = xmlh.get_tag_val(org, "name") # ok to be lazy-- no other 'name's in this feed
s += '<sponsoringOrganizationIDs><sponsoringOrganizationID>%s</sponsoringOrganizationID></sponsoringOrganizationIDs>' % (organizations[orgname])
# hardcoded: volunteerHubOrganizationID
s += '<volunteerHubOrganizationIDs><volunteerHubOrganizationID>0</volunteerHubOrganizationID></volunteerHubOrganizationIDs>'
s += '<title>%s</title>' % (xmlh.get_tag_val(opp, "title"))
# lazy: id is the same as the link field...
s += '<detailURL>%s</detailURL>' % (id_link)
# lazy: idealist stuffs a div in the content...
entry_content = xmlh.get_tag_val(opp, 'content')
s += '<description>%s</description>' % removeContentWrapperDiv(entry_content)
s += '<abstract>%s</abstract>' % (xmlh.get_tag_val(opp, "summary"))
pubdate = xmlh.get_tag_val(opp, "published")
ts = dateutil.parser.parse(pubdate)
pubdate = ts.strftime("%Y-%m-%dT%H:%M:%S")
s += '<lastUpdated>%s</lastUpdated>' % (pubdate)
s += '<expires>%sT23:59:59</expires>' % (xmlh.get_tag_val(opp, "gg_expiration_date"))
dbevents = opp.getElementsByTagName("gg_event_date_range")
if (dbevents.length != 1):
print datetime.now(), "parse_idealist: only 1 db:event supported."
return None
s += '<locations><location>'
# yucko: idealist is stored in Google Base, which only has 'location'
# so we stuff it into the city field, knowing that it'll just get
# concatenated down the line...
s += '<city>%s</city>' % (xmlh.get_tag_val(opp, "gg_location"))
s += '</location></locations>'
dbscheduledTimes = opp.getElementsByTagName("gg_event_date_range")
if (dbscheduledTimes.length != 1):
print datetime.now(), "parse_usaservice: only 1 gg_event_date_range supported."
return None
dbscheduledTime = dbscheduledTimes[0]
s += '<dateTimeDurations><dateTimeDuration>'
s += '<openEnded>No</openEnded>'
# ignore duration
# ignore commitmentHoursPerWeek
tempdate = xmlh.get_tag_val(dbscheduledTime, "gg_start")
ts = dateutil.parser.parse(tempdate)
tempdate = ts.strftime("%Y-%m-%d")
s += '<startDate>%s</startDate>' % (tempdate)
tempdate = xmlh.get_tag_val(dbscheduledTime, "gg_end")
ts = dateutil.parser.parse(tempdate)
tempdate = ts.strftime("%Y-%m-%d")
s += '<endDate>%s</endDate>' % (tempdate)
# TODO: timezone???
s += '</dateTimeDuration></dateTimeDurations>'
s += '<categoryTags>'
# proper way is too slow...
#cats = opp.getElementsByTagName("category")
#for i,cat in enumerate(cats):
# s += '<categoryTag>%s</categoryTag>' % (cat.attributes["label"].value)
catstrs = re.findall(r'<category term=(["][^"]+["])', oppstr, re.DOTALL)
for cat in catstrs:
s += "<categoryTag>" + xml.sax.saxutils.escape(cat) + "</categoryTag>"
s += '</categoryTags>'
age_range = xmlh.get_tag_val(opp, "gg_age_range")
if re.match(r'and under|Families', age_range):
s += '<minimumAge>0</minimumAge>'
elif re.match(r'Teens', age_range):
s += '<minimumAge>13</minimumAge>'
elif re.match(r'Adults', age_range):
s += '<minimumAge>18</minimumAge>'
elif re.match(r'Seniors', age_range):
s += '<minimumAge>65</minimumAge>'
s += '</VolunteerOpportunity>'
numopps += 1
s += '</VolunteerOpportunities>'
s += '</FootprintFeed>'
#s = re.sub(r'><([^/])', r'>\n<\1', s)
#print s
return s, numorgs, numopps
# pylint: disable-msg=R0915
def parse(s, maxrecs, progress):
"""return FPXML given idealist data"""
s = addCdataToContent(s, progress)
s = remove_g_namespace(s, progress)
s = ParseHelper(s, maxrecs, progress)
return s
| Python |
# Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
parser for Network For Good feeds
"""
import xml_helpers as xmlh
import re
from datetime import datetime
ORGS = {}
ORGIDS = {}
MAX_ORGID = 0
def register_org(item):
"""register the organization info, for lookup later."""
global MAX_ORGID
# SponsoringOrganization/Name -- fortunately, no conflicts
# but there's no data except the name
orgname = xmlh.get_tag_val(item, "Name")
if orgname in ORGIDS:
return ORGIDS[orgname]
MAX_ORGID = MAX_ORGID + 1
orgstr = '<Organization>'
orgstr += '<organizationID>%d</organizationID>' % (MAX_ORGID)
orgstr += '<nationalEIN />'
orgstr += '<name>%s</name>' % (orgname)
orgstr += '<missionStatement />'
orgstr += '<description />'
orgstr += '<location>'
orgstr += xmlh.output_node("city", item, "City")
orgstr += xmlh.output_node("region", item, "StateOrProvince")
orgstr += xmlh.output_node("postalCode", item, "ZipOrPostalCode")
orgstr += '</location>'
orgstr += '<organizationURL />'
orgstr += '<donateURL />'
orgstr += '<logoURL />'
orgstr += '<detailURL />'
orgstr += '</Organization>'
ORGS[MAX_ORGID] = orgstr
ORGIDS[orgname] = MAX_ORGID
return MAX_ORGID
# pylint: disable-msg=R0915
def parser(providerID, providerName, feedID, providerURL, feedDescription):
"""create an NFG-compatible parser"""
known_elnames = [
'Abstract', 'Categories', 'Category', 'CategoryID', 'Country', 'DateListed',
'Description', 'DetailURL', 'Duration', 'DurationQuantity', 'DurationUnit',
'EndDate', 'KeyWords', 'LocalID', 'Location', 'LocationClassification',
'LocationClassificationID', 'LocationClassifications', 'Locations',
'LogoURL', 'Name', 'OpportunityDate', 'OpportunityDates', 'OpportunityType',
'OpportunityTypeID', 'SponsoringOrganization', 'SponsoringOrganizations',
'StartDate', 'StateOrProvince', 'Title', 'VolunteerOpportunity',
'ZipOrPostalCode' ]
def parse(instr, maxrecs, progress):
numorgs = numopps = 0
instr = re.sub(r'<(/?db):', r'<\1_', instr)
opps = re.findall(r'<VolunteerOpportunity>.+?</VolunteerOpportunity>',
instr, re.DOTALL)
volopps = ""
for i, oppstr in enumerate(opps):
if progress and i > 0 and i % 250 == 0:
print str(datetime.now())+": ", i, " opportunities processed."
if (maxrecs > 0 and i > maxrecs):
break
xmlh.print_rps_progress("opps", progress, i, maxrecs)
item = xmlh.simple_parser(oppstr, known_elnames, progress=False)
orgid = register_org(item)
# logoURL -- sigh, this is for the opportunity not the org
volopps += '<VolunteerOpportunity>'
volopps += xmlh.output_val('volunteerOpportunityID', str(i))
volopps += xmlh.output_val('sponsoringOrganizationID', str(orgid))
volopps += xmlh.output_node('volunteerHubOrganizationID', item, "LocalID")
volopps += xmlh.output_node('title', item, "Title")
volopps += xmlh.output_node('abstract', item, "Description")
volopps += xmlh.output_node('description', item, "Description")
volopps += xmlh.output_node('detailURL', item, "DetailURL")
volopps += xmlh.output_val('volunteersNeeded', "-8888")
oppdates = item.getElementsByTagName("OpportunityDate")
if oppdates.length > 1:
print datetime.now(), \
"parse_servenet.py: only 1 OpportunityDate supported."
#return None
oppdate = oppdates[0]
elif oppdates.length == 0:
oppdate = None
else:
oppdate = oppdates[0]
volopps += '<dateTimeDurations><dateTimeDuration>'
if oppdate:
volopps += xmlh.output_val('openEnded', 'No')
volopps += xmlh.output_val('duration', 'P%s%s' %
(xmlh.get_tag_val(oppdate, "DurationQuantity"),
xmlh.get_tag_val(oppdate, "DurationUnit")))
volopps += xmlh.output_val('commitmentHoursPerWeek', '0')
volopps += xmlh.output_node('startDate', oppdate, "StartDate")
volopps += xmlh.output_node('endDate', oppdate, "EndDate")
else:
volopps += xmlh.output_val('openEnded', 'Yes')
volopps += xmlh.output_val('commitmentHoursPerWeek', '0')
volopps += '</dateTimeDuration></dateTimeDurations>'
volopps += '<locations>'
opplocs = item.getElementsByTagName("Location")
for opploc in opplocs:
volopps += '<location>'
volopps += xmlh.output_node('region', opploc, "StateOrProvince")
volopps += xmlh.output_node('country', opploc, "Country")
volopps += xmlh.output_node('postalCode', opploc, "ZipOrPostalCode")
volopps += '</location>'
volopps += '</locations>'
volopps += '<categoryTags/>'
volopps += '</VolunteerOpportunity>'
numopps += 1
# convert to footprint format
outstr = '<?xml version="1.0" ?>'
outstr += '<FootprintFeed schemaVersion="0.1">'
outstr += '<FeedInfo>'
outstr += xmlh.output_val('providerID', providerID)
outstr += xmlh.output_val('providerName', providerName)
outstr += xmlh.output_val('feedID', feedID)
outstr += xmlh.output_val('createdDateTime', xmlh.current_ts())
outstr += xmlh.output_val('providerURL', providerURL)
outstr += xmlh.output_val('description', feedDescription)
# TODO: capture ts -- use now?!
outstr += '</FeedInfo>'
# hardcoded: Organization
outstr += '<Organizations>'
for key in ORGS:
outstr += ORGS[key]
numorgs += 1
outstr += '</Organizations>'
outstr += '<VolunteerOpportunities>'
outstr += volopps
outstr += '</VolunteerOpportunities>'
outstr += '</FootprintFeed>'
#outstr = re.sub(r'><([^/])', r'>\n<\1', outstr)
return outstr, numorgs, numopps
return parse
| Python |
#!/usr/bin/python
# Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# TODO: remove silly dependency on dapper.net-- thought I'd need
# it for the full scrape, but ended up not going that way.
"""crawler for craigslist until they provide a real feed."""
from xml.dom import minidom
import sys
import os
import urllib
import re
import thread
import time
import datetime
import socket
DEFAULT_TIMEOUT = 10
socket.setdefaulttimeout(DEFAULT_TIMEOUT)
METROS_FN = "craigslist-metros.txt"
CACHE_FN = "craigslist-cache.txt"
pages = {}
page_lock = thread.allocate_lock()
crawlers = 0
crawlers_lock = thread.allocate_lock()
cachefile_lock = thread.allocate_lock()
# set to a lower number if you have problems
MAX_CRAWLERS = 40
def read_metros():
global metros
metros = {}
fh = open(METROS_FN, "r")
for line in fh:
url,name = line.split("|")
metros[url] = name
def crawl_metros():
#<geo dataType="RawString" fieldName="geo" href="http://waterloo.craigslist.org/" originalElement="a" type="field">waterloo / cedar falls</geo>
print "getting toplevel geos..."
fh = urllib.urlopen("http://www.dapper.net/RunDapp?dappName=craigslistmetros&v=1&applyToUrl=http%3A%2F%2Fgeo.craigslist.org%2Fiso%2Fus")
geostr = fh.read()
fh.close()
dom = minidom.parseString(geostr)
nodes = dom.getElementsByTagName("geo")
outfh = open(METROS_FN, "w+")
domains = []
for node in nodes:
domain = node.getAttribute("href")
#print "finding submetros within", domain
fh1 = urllib.urlopen(domain)
domain_homepage = fh1.read()
fh1.close()
#<td align="center" colspan="5" id="topban">
#<div>
#<h2>new york city</h2> <sup><a href="http://en.wikipedia.org/wiki/New_York_City">w</a></sup>
#<span class="for"><a href="/mnh/" title="manhattan">mnh</a> <a href="/brk/" title="brooklyn">brk</a> <a href="/que/" title="queens">que</a> <a href="/brx/" title="bronx">brx</a> <a href="/stn/" title="staten island">stn</a> <a href="/jsy/" title="new jersey">jsy</a> <a href="/lgi/" title="long island">lgi</a> <a href="/wch/" title="westchester">wch</a> <a href="/fct/" title="fairfield">fct</a> </span>
#</div>
#</td>
topbanstrs = re.findall(r'<td align="center" colspan="5" id="topban">.+?</td>', domain_homepage, re.DOTALL)
for topbanstr in topbanstrs:
links = re.findall(r'<a href="/(.+?)".+?title="(.+?)".+?</a>', topbanstr, re.DOTALL)
if len(links) > 0:
for link in links:
print domain+link[0], ":", link[1]
outfh.write(domain+link[0]+"|"+link[1]+"\n")
else:
names = re.findall(r'<h2>(.+?)</h2>', domain_homepage, re.DOTALL)
print domain, ":", names[0]
outfh.write(domain+"|"+names[0]+"\n")
outfh.close()
def crawl(url, ignore):
global crawlers, crawlers_lock, pages, page_lock, MAX_CRAWLERS
if url in pages:
return
while crawlers > MAX_CRAWLERS:
time.sleep(1)
# we don't care if several wake at once
crawlers_lock.acquire()
crawlers = crawlers + 1
crawlers_lock.release()
#proxied_url = "http://suprfetch.appspot.com/?url="+urllib.quote(url+"?for_google_and_craigslist.org_project_footprint_please_dont_block")
proxied_url = "http://suprfetch.appspot.com/?url="+urllib.quote(url)
page = ""
attempts = 0
while attempts < 3 and page == "":
try:
fh = urllib.urlopen(proxied_url)
page = fh.read()
fh.close()
except:
page = "" # in case close() threw exception
attempts = attempts + 1
print "open failed, retry after", attempts, "attempts (url="+url+")"
time.sleep(1)
if re.search(r'This IP has been automatically blocked', page, re.DOTALL):
print "uh oh: craiglist is blocking us (IP blocking). exiting..."
sys.exit(1)
if (re.search(r'sorry.google.com/sorry/', page) or
re.search(r'to automated requests from a computer virus or spyware', page, re.DOTALL)):
print "uh oh: google is blocking us (DOS detector). exiting..."
sys.exit(1)
if re.search(r'<TITLE>302 Moved</TITLE>"',page, re.DOTALL):
newlocstr = re.findall(r'The document has moved <A HREF="(.+?)"',page)
print "being redirected to",newlocstr[0]
crawl(newlocstr[0], "foo")
return
if attempts >= 3:
print "crawl failed after 3 attempts:",url
return
page_lock.acquire()
pages[url] = page
page_lock.release()
cached_page = re.sub(r'(?:\r?\n|\r)',' ',page)
cachefile_lock.acquire()
outfh = open(CACHE_FN, "a")
outfh.write(url+"-Q-"+cached_page+"\n")
outfh.close()
cachefile_lock.release()
crawlers_lock.acquire()
crawlers = crawlers - 1
crawlers_lock.release()
def wait_for_page(url):
res = ""
while res == "":
page_lock.acquire()
if url in pages:
res = pages[url]
page_lock.release()
if res == "":
time.sleep(2)
return res
def sync_fetch(url):
crawl(url, "")
if url not in pages:
print "sync_fetch, failed to crawl url",url
sys.exit(1)
return pages[url]
progstart = time.time()
def secs_since_progstart():
global progstart
return time.time() - progstart
def crawl_metro_page(url, unused):
global crawlers, crawlers_lock, pages, page_lock
listingpage = sync_fetch(url)
listingurls = re.findall(r'<p><a href="/(.+?)">', listingpage)
base = re.sub(r'.org/.+', '.org/', url)
for listing_url in listingurls:
#print "found",base+listing_url,"in",url
crawl(base+listing_url, "")
path = re.sub(r'[^/]+$', '', url)
nextpages = re.findall(r'<a href="(index[0-9]+[.]html)"', listingpage)
for nextpage_url in nextpages:
#print "found",path+nextpage_url,"in",url
thread.start_new_thread(crawl_metro_page, (path+nextpage_url, ""))
def parse_cache_file(s, listings_only=False, printerrors=True):
global pages
for i,line in enumerate(s.splitlines()):
#print line[0:100]
res = re.findall(r'^(.+?)-Q-(.+)', line)
try:
url,page = res[0][0], res[0][1]
if (not listings_only or re.search(r'html$', url)):
pages[url] = page
except:
if printerrors:
print "error parsing cache file on line",i+1
print line
def load_cache():
global CACHE_FN
try:
fh = open(CACHE_FN, "r")
instr = fh.read()
print "closing cache file", CACHE_FN
fh.close()
print "parsing cache data", len(instr), "bytes"
parse_cache_file(instr, False)
print "loaded", len(pages), "pages."
except:
# ignore errors if file doesn't exist
pass
def print_status():
global pages, num_cached_pages, crawlers
samesame = 0
last_crawled_pages = 0
while True:
crawled_pages = len(pages) - num_cached_pages
pages_per_sec = int(crawled_pages/secs_since_progstart())
msg = str(secs_since_progstart())+": main thread: "
msg += "waiting for " + str(crawlers) + " crawlers.\n"
msg += str(crawled_pages) + " pages crawled so far"
msg += "(" + str(pages_per_sec) + " pages/sec). "
msg += str(len(pages)) + " total pages."
print msg
if last_crawled_pages == crawled_pages:
samesame += 1
if samesame >= 100:
print "done (waited long enough)."
break
else:
last_crawled_pages = crawled_pages
time.sleep(2)
from optparse import OptionParser
if __name__ == "__main__":
parser = OptionParser("usage: %prog [options]...")
parser.set_defaults(metros=False)
parser.set_defaults(load_cache=True)
parser.add_option("--metros", action="store_true", dest="metros")
parser.add_option("--load_cache", action="store_true", dest="load_cache")
parser.add_option("--noload_cache", action="store_false", dest="load_cache")
(options, args) = parser.parse_args(sys.argv[1:])
if options.metros:
crawl_metros()
read_metros()
if options.load_cache:
load_cache()
else:
try:
os.unlink(CACHE_FN)
except:
pass
num_cached_pages = len(pages)
outstr = ""
for url in metros:
thread.start_new_thread(crawl_metro_page, (url+"vol/", ""))
print_status()
sys.exit(0)
| Python |
# Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
parser for feed stored in a google spreadsheet
(note that this is different from other parsers inasmuch as it
expects the caller to pass in the providerID and providerName)
"""
# typical cell
#<entry>
#<id>http://spreadsheets.google.com/feeds/cells/pMY64RHUNSVfKYZKPoVXPBg
#/1/public/basic/R14C13</id>
#<updated>2009-04-28T03:29:56.957Z</updated>
#<category scheme='http://schemas.google.com/spreadsheets/2006'
#term='http://schemas.google.com/spreadsheets/2006#cell'/>
#<title type='text'>M14</title>
#<content type='text'>ginny@arthur.edu</content>
#<link rel='self' type='application/atom+xml' href='http://spreadsheets.
#google.com/feeds/cells/pMY64RHUNSVfKYZKPoVXPBg/1/public/basic/R14C13'/>
#</entry>
import xml_helpers as xmlh
import re
import urllib
import sys
import time
from datetime import datetime
CURRENT_ROW = None
def parser_error(msg):
if CURRENT_ROW != None:
msg = "row "+str(CURRENT_ROW)+": "+msg
print "parse_gspreadsheet ERROR: "+msg
def raw_recordval(record, key):
if key in record:
return str(record[key]).strip()
return ""
def recordval(record, key):
return re.sub(r'\s+', ' ', raw_recordval(record, key))
KNOWN_ORGS = {}
def get_dtval(record, field_name):
val = recordval(record, field_name)
if val != "" and not re.match(r'\d\d?/\d\d?/\d\d\d\d', val):
parser_error("bad value in "+field_name+": '"+val+"'-- try MM/DD/YYYY")
return val
def get_tmval(record, field_name):
val = recordval(record, field_name)
if val != "" and not re.match(r'\d?\d:\d\d(:\d\d)?', val):
parser_error("bad value in "+field_name+": '"+val+"'-- try HH:MM:SS")
return val
def record_to_fpxml(record):
fpxml = ""
fpxml += '<VolunteerOpportunity>'
fpxml += xmlh.output_val("volunteerOpportunityID", recordval(record, 'oppid'))
orgname = recordval(record,'SponsoringOrganization')
if orgname not in KNOWN_ORGS:
KNOWN_ORGS[orgname] = len(KNOWN_ORGS)
fpxml += xmlh.output_val("sponsoringOrganizationID", KNOWN_ORGS[orgname])
title = recordval(record,'OpportunityTitle')
if title == "":
parser_error("missing OpportunityTitle-- this field is required.")
fpxml += xmlh.output_val("title", title)
fpxml += '<dateTimeDurations>'
fpxml += '<dateTimeDuration>'
if ('StartDate' in record and
recordval(record,'StartDate').find("ongoing") >= 0):
fpxml += xmlh.output_val('openEnded', 'Yes')
else:
fpxml += xmlh.output_val('openEnded', 'No')
startdtval = get_dtval(record, 'StartDate')
if startdtval != "":
fpxml += xmlh.output_val('startDate', startdtval)
starttmval = get_tmval(record, 'StartTime')
if starttmval != "":
fpxml += xmlh.output_val('startTime', starttmval)
enddtval = get_dtval(record, 'EndDate')
if enddtval != "":
fpxml += xmlh.output_val('endDate', enddtval)
endtmval = get_tmval(record, 'EndTime')
if endtmval != "":
fpxml += xmlh.output_val('endTime', endtmval)
freq = recordval(record,'Frequency').lower()
if freq == "" or freq.find("once") >= 0:
fpxml += '<iCalRecurrence/>'
elif freq.find("daily") >= 0:
fpxml += '<iCalRecurrence>FREQ=DAILY</iCalRecurrence>'
elif freq.find("weekly") >= 0:
fpxml += '<iCalRecurrence>FREQ=WEEKLY</iCalRecurrence>'
elif freq.find("other") >= 0 and freq.find("week") >= 0:
fpxml += '<iCalRecurrence>FREQ=WEEKLY;INTERVAL=2</iCalRecurrence>'
elif freq.find("monthly") >= 0:
fpxml += '<iCalRecurrence>FREQ=MONTHLY</iCalRecurrence>'
else:
parser_error("unsupported frequency: '"+recordval(record,'Frequency')+"'-- skipping")
fpxml += xmlh.output_val('commitmentHoursPerWeek', recordval(record,'CommitmentHours'))
fpxml += '</dateTimeDuration>'
fpxml += '</dateTimeDurations>'
fpxml += '<locations>'
fpxml += '<location>'
if recordval(record,'LocationName').find("virtual") >= 0:
fpxml += xmlh.output_val('virtual', 'Yes')
else:
fpxml += xmlh.output_val('virtual', 'No')
fpxml += xmlh.output_val('name', recordval(record,'LocationName'))
fpxml += xmlh.output_val('streetAddress1', recordval(record,'LocationStreet'))
fpxml += xmlh.output_val('city', recordval(record,'LocationCity'))
fpxml += xmlh.output_val('region', recordval(record,'LocationProvince'))
fpxml += xmlh.output_val('postalCode', recordval(record,'LocationPostalCode'))
fpxml += xmlh.output_val('country', recordval(record,'LocationCountry'))
fpxml += '</location>'
fpxml += '</locations>'
fpxml += xmlh.output_val('paid', recordval(record,'Paid'))
fpxml += xmlh.output_val('minimumAge', recordval(record,'MinimumAge'))
# TODO: seniors only, kidfriendly
fpxml += xmlh.output_val('sexRestrictedTo', recordval(record,'SexRestrictedTo'))
fpxml += xmlh.output_val('skills', recordval(record,'Skills'))
fpxml += xmlh.output_val('contactName', recordval(record,'ContactName'))
fpxml += xmlh.output_val('contactPhone', recordval(record,'ContactPhone'))
fpxml += xmlh.output_val('contactEmail', recordval(record,'ContactEmail'))
fpxml += xmlh.output_val('detailURL', recordval(record,'URL'))
# note: preserve whitespace in description
fpxml += xmlh.output_val('description', raw_recordval(record,'Description'))
fpxml += '<lastUpdated olsonTZ="Etc/UTC">'
fpxml += recordval(record,'LastUpdated') + '</lastUpdated>'
fpxml += '</VolunteerOpportunity>'
return fpxml
def cellval(data, row, col):
key = 'R'+str(row)+'C'+str(col)
if key not in data:
return None
return data[key]
def parse_gspreadsheet(instr, data, updated, progress):
# look ma, watch me parse XML a zillion times faster!
#<entry><id>http://spreadsheets.google.com/feeds/cells/pMY64RHUNSVfKYZKPoVXPBg
#/1/public/basic/R14C15</id><updated>2009-04-28T03:34:21.900Z</updated>
#<category scheme='http://schemas.google.com/spreadsheets/2006'
#term='http://schemas.google.com/spreadsheets/2006#cell'/><title type='text'>
#O14</title><content type='text'>http://www.fake.org/vol.php?id=4</content>
#<link rel='self' type='application/atom+xml'
#href='http://spreadsheets.google.com/feeds/cells/pMY64RHUNSVfKYZKPoVXPBg/1/
#public/basic/R14C15'/></entry>
regexp = re.compile('<entry>.+?(R(\d+)C(\d+))</id>'+
'<updated.*?>(.+?)</updated>.*?'+
'<content.*?>(.+?)</content>.+?</entry>', re.DOTALL)
maxrow = maxcol = 0
for i, match in enumerate(re.finditer(regexp, instr)):
if progress and i > 0 and i % 250 == 0:
print str(datetime.now())+": ", maxrow, "rows and", i, " cells processed."
lastupd = re.sub(r'([.][0-9]+)?Z?$', '', match.group(4)).strip()
#print "lastupd='"+lastupd+"'"
updated[match.group(1)] = lastupd.strip("\r\n\t ")
val = match.group(5).strip("\r\n\t ")
data[match.group(1)] = val
row = int(match.group(2))
if row > maxrow:
maxrow = row
col = int(match.group(3))
if col > maxcol:
maxcol = col
#print row, col, val
if progress:
print str(datetime.now())+": found ", maxrow, "rows and", maxcol, "columns."
return maxrow, maxcol
def read_gspreadsheet(url, data, updated, progress):
# read the spreadsheet into a big string
infh = urllib.urlopen(url)
instr = infh.read()
infh.close()
return parse_gspreadsheet(instr, data, updated, progress)
def find_header_row(data, regexp_str):
regexp = re.compile(regexp_str, re.IGNORECASE|re.DOTALL)
header_row = header_startcol = -1
for row in range(20):
if header_row != -1:
break
for col in range(5):
val = cellval(data, row, col)
if (val and re.search(regexp, val)):
header_row = row
header_startcol = col
break
if header_row == -1:
parser_error("no header row found: looked for "+regexp_str)
if header_startcol == -1:
parser_error("no header start column found")
return header_row, header_startcol
def parse(instr, maxrecs, progress):
# TODO: a spreadsheet should really be an object and cellval a method
data = {}
updated = {}
maxrow, maxcol = parse_gspreadsheet(instr, data, updated, progress)
# find header row: look for "opportunity title" (case insensitive)
header_row, header_startcol = find_header_row(data, 'opportunity\s*title')
header_colidx = {}
header_names = {}
header_col = header_startcol
while True:
header_str = cellval(data, header_row, header_col)
if not header_str:
break
field_name = None
header_str = header_str.lower()
if header_str.find("title") >= 0:
field_name = "OpportunityTitle"
elif header_str.find("organization") >= 0 and header_str.find("sponsor") >= 0:
field_name = "SponsoringOrganization"
elif header_str.find("description") >= 0:
field_name = "Description"
elif header_str.find("skills") >= 0:
field_name = "Skills"
elif header_str.find("location") >= 0 and header_str.find("name") >= 0:
field_name = "LocationName"
elif header_str.find("street") >= 0:
field_name = "LocationStreet"
elif header_str.find("city") >= 0:
field_name = "LocationCity"
elif header_str.find("state") >= 0 or header_str.find("province") >= 0:
field_name = "LocationProvince"
elif header_str.find("zip") >= 0 or header_str.find("postal") >= 0:
field_name = "LocationPostalCode"
elif header_str.find("country") >= 0:
field_name = "LocationCountry"
elif header_str.find("start") >= 0 and header_str.find("date") >= 0:
field_name = "StartDate"
elif header_str.find("start") >= 0 and header_str.find("time") >= 0:
field_name = "StartTime"
elif header_str.find("end") >= 0 and header_str.find("date") >= 0:
field_name = "EndDate"
elif header_str.find("end") >= 0 and header_str.find("time") >= 0:
field_name = "EndTime"
elif header_str.find("contact") >= 0 and header_str.find("name") >= 0:
field_name = "ContactName"
elif header_str.find("email") >= 0 or header_str.find("e-mail") >= 0:
field_name = "ContactEmail"
elif header_str.find("phone") >= 0:
field_name = "ContactPhone"
elif header_str.find("website") >= 0 or header_str.find("url") >= 0:
field_name = "URL"
elif header_str.find("often") >= 0:
field_name = "Frequency"
elif header_str.find("days") >= 0 and header_str.find("week") >= 0:
field_name = "DaysOfWeek"
elif header_str.find("paid") >= 0:
field_name = "Paid"
elif header_str.find("commitment") >= 0 or header_str.find("hours") >= 0:
field_name = "CommitmentHours"
elif header_str.find("age") >= 0 and header_str.find("min") >= 0:
field_name = "MinimumAge"
elif header_str.find("kid") >= 0:
field_name = "KidFriendly"
elif header_str.find("senior") >= 0 and header_str.find("only") >= 0:
field_name = "SeniorsOnly"
elif header_str.find("sex") >= 0 or header_str.find("gender") >= 0:
field_name = "SexRestrictedTo"
elif header_str.find("volunteer appeal") >= 0:
field_name = None
else:
parser_error("couldn't map header '"+header_str+"' to a field name.")
if field_name != None:
header_colidx[field_name] = header_col
header_names[header_col] = field_name
#print header_str, "=>", field_name
header_col += 1
if len(header_names) < 10:
parser_error("too few fields found: "+str(len(header_names)))
# check to see if there's a header-description row
header_desc = cellval(data, header_row+1, header_startcol)
if not header_desc:
parser_error("blank row not allowed below header row")
header_desc = header_desc.lower()
data_startrow = header_row + 1
if header_desc.find("up to") >= 0:
data_startrow += 1
# find the data
global CURRENT_ROW
CURRENT_ROW = row = data_startrow
blankrows = 0
MAX_BLANKROWS = 2
volopps = '<VolunteerOpportunities>'
numorgs = numopps = 0
while True:
blankrow = True
#rowstr = "row="+str(row)+"\n"
record = {}
record['LastUpdated'] = '0000-00-00'
for field_name in header_colidx:
col = header_colidx[field_name]
val = cellval(data, row, col)
if val:
blankrow = False
else:
val = ""
#rowstr += " "+field_name+"="+val+"\n"
record[field_name] = val
key = 'R'+str(row)+'C'+str(col)
if (key in updated and
updated[key] > record['LastUpdated']):
record['LastUpdated'] = updated[key]
if blankrow:
blankrows += 1
if blankrows > MAX_BLANKROWS:
break
else:
numopps += 1
blankrows = 0
record['oppid'] = str(numopps)
volopps += record_to_fpxml(record)
row += 1
CURRENT_ROW = row
CURRENT_ROW = None
if progress:
print str(datetime.now())+": ", numopps, "opportunities found."
volopps += '</VolunteerOpportunities>'
outstr = '<?xml version="1.0" ?>'
outstr += '<FootprintFeed schemaVersion="0.1">'
outstr += '<FeedInfo>'
# providerID replaced by caller
outstr += '<providerID></providerID>'
# providerName replaced by caller
outstr += '<providerName></providerName>'
outstr += '<feedID>1</feedID>'
outstr += '<createdDateTime>%s</createdDateTime>' % xmlh.current_ts()
# providerURL replaced by caller
outstr += '<providerURL></providerURL>'
outstr += '<description></description>'
outstr += '</FeedInfo>'
outstr += "<Organizations>"
for orgname in KNOWN_ORGS:
outstr += "<Organization>"
outstr += xmlh.output_val("organizationID", KNOWN_ORGS[orgname])
outstr += xmlh.output_val("name", orgname)
outstr += "</Organization>"
outstr += "</Organizations>"
outstr += volopps
outstr += '</FootprintFeed>'
#outstr = re.sub(r'><', '>\n<', outstr)
#print outstr
return outstr, numorgs, numopps
| Python |
# Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
parser for usaservice.org
"""
import xml_helpers as xmlh
import re
from datetime import datetime
import dateutil.parser
# pylint: disable-msg=R0915
def parse(instr, maxrecs, progress):
"""return FPXML given usaservice data"""
# TODO: progress
known_elnames = [ 'channel', 'db:abstract', 'db:address', 'db:attendee_count', 'db:categories', 'db:city', 'db:country', 'db:county', 'db:dateTime', 'db:event', 'db:eventType', 'db:guest_total', 'db:host', 'db:latitude', 'db:length', 'db:longitude', 'db:rsvp', 'db:scheduledTime', 'db:state', 'db:street', 'db:title', 'db:venue_name', 'db:zipcode', 'description', 'docs', 'guid', 'item', 'language', 'link', 'pubDate', 'rss', 'title', ]
# convert to footprint format
s = '<?xml version="1.0" ?>'
s += '<FootprintFeed schemaVersion="0.1">'
s += '<FeedInfo>'
# TODO: assign provider IDs?
s += '<providerID>101</providerID>'
s += '<providerName>usaservice.org</providerName>'
s += '<feedID>1</feedID>'
s += '<createdDateTime>%s</createdDateTime>' % xmlh.current_ts()
s += '<providerURL>http://www.usaservice.org/</providerURL>'
s += '<description>Syndicated events</description>'
# TODO: capture ts -- use now?!
s += '</FeedInfo>'
numorgs = numopps = 0
# hardcoded: Organization
s += '<Organizations>'
s += '<Organization>'
s += '<organizationID>0</organizationID>'
s += '<nationalEIN></nationalEIN>'
s += '<name></name>'
s += '<missionStatement></missionStatement>'
s += '<description></description>'
s += '<location><city></city><region></region><postalCode></postalCode></location>'
s += '<organizationURL></organizationURL>'
s += '<donateURL></donateURL>'
s += '<logoURL></logoURL>'
s += '<detailURL></detailURL>'
s += '</Organization>'
numorgs += 1
s += '</Organizations>'
s += '<VolunteerOpportunities>'
instr = re.sub(r'<(/?db):', r'<\1_', instr)
for i, line in enumerate(instr.splitlines()):
if (maxrecs>0 and i>maxrecs):
break
xmlh.print_rps_progress("opps", progress, i, maxrecs)
item = xmlh.simple_parser(line, known_elnames, progress=False)
# unmapped: db_rsvp (seems to be same as link, but with #rsvp at end of url?)
# unmapped: db_host (no equivalent?)
# unmapped: db_county (seems to be empty)
# unmapped: attendee_count
# unmapped: guest_total
# unmapped: db_title (dup of title, above)
s += '<VolunteerOpportunity>'
s += '<volunteerOpportunityID>%s</volunteerOpportunityID>' % (xmlh.get_tag_val(item, "guid"))
# hardcoded: sponsoringOrganizationID
s += '<sponsoringOrganizationIDs><sponsoringOrganizationID>0</sponsoringOrganizationID></sponsoringOrganizationIDs>'
# hardcoded: volunteerHubOrganizationID
s += '<volunteerHubOrganizationIDs><volunteerHubOrganizationID>0</volunteerHubOrganizationID></volunteerHubOrganizationIDs>'
s += '<title>%s</title>' % (xmlh.get_tag_val(item, "title"))
s += '<abstract>%s</abstract>' % (xmlh.get_tag_val(item, "abstract"))
s += '<volunteersNeeded>-8888</volunteersNeeded>'
dbscheduledTimes = item.getElementsByTagName("db_scheduledTime")
if (dbscheduledTimes.length != 1):
print datetime.now(), "parse_usaservice: only 1 db_scheduledTime supported."
return None
dbscheduledTime = dbscheduledTimes[0]
s += '<dateTimeDurations><dateTimeDuration>'
length = xmlh.get_tag_val(dbscheduledTime, "db_length")
if length == "" or length == "-1":
s += '<openEnded>Yes</openEnded>'
else:
s += '<openEnded>No</openEnded>'
date, time = xmlh.get_tag_val(dbscheduledTime, "db_dateTime").split(" ")
s += '<startDate>%s</startDate>' % (date)
# TODO: timezone???
s += '<startTime>%s</startTime>' % (time)
s += '</dateTimeDuration></dateTimeDurations>'
dbaddresses = item.getElementsByTagName("db_address")
if (dbaddresses.length != 1):
print datetime.now(), "parse_usaservice: only 1 db_address supported."
return None
dbaddress = dbaddresses[0]
s += '<locations><location>'
s += '<name>%s</name>' % (xmlh.get_tag_val(item, "db_venue_name"))
s += '<streetAddress1>%s</streetAddress1>' % (xmlh.get_tag_val(dbaddress, "db_street"))
s += '<city>%s</city>' % (xmlh.get_tag_val(dbaddress, "db_city"))
s += '<region>%s</region>' % (xmlh.get_tag_val(dbaddress, "db_state"))
s += '<country>%s</country>' % (xmlh.get_tag_val(dbaddress, "db_country"))
s += '<postalCode>%s</postalCode>' % (xmlh.get_tag_val(dbaddress, "db_zipcode"))
s += '<latitude>%s</latitude>' % (xmlh.get_tag_val(item, "db_latitude"))
s += '<longitude>%s</longitude>' % (xmlh.get_tag_val(item, "db_longitude"))
s += '</location></locations>'
type = xmlh.get_tag_val(item, "db_eventType")
s += '<categoryTags><categoryTag>%s</categoryTag></categoryTags>' % (type)
s += '<contactName>%s</contactName>' % xmlh.get_tag_val(item, "db_host")
s += '<detailURL>%s</detailURL>' % (xmlh.get_tag_val(item, "link"))
s += '<description>%s</description>' % (xmlh.get_tag_val(item, "description"))
pubdate = xmlh.get_tag_val(item, "pubDate")
if re.search("[0-9][0-9] [A-Z][a-z][a-z] [0-9][0-9][0-9][0-9]", pubdate):
# TODO: parse() is ignoring timzone...
ts = dateutil.parser.parse(pubdate)
pubdate = ts.strftime("%Y-%m-%dT%H:%M:%S")
s += '<lastUpdated>%s</lastUpdated>' % (pubdate)
s += '</VolunteerOpportunity>'
numopps += 1
s += '</VolunteerOpportunities>'
s += '</FootprintFeed>'
#s = re.sub(r'><([^/])', r'>\n<\1', s)
return s, numorgs, numopps
| Python |
# Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
appengine main() for when the site is down.
"""
# note: view classes aren inherently not pylint-compatible
# pylint: disable-msg=C0103
# pylint: disable-msg=W0232
# pylint: disable-msg=E1101
# pylint: disable-msg=R0903
from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import run_wsgi_app
class SiteDownHandler(webapp.RequestHandler):
"""use a redirect so search engines don't index this as our homepage."""
def get(self):
"""GET handler"""
self.redirect("/site_down.html")
def main():
"""main function"""
run_wsgi_app(webapp.WSGIApplication([(r'/.*', SiteDownHandler)], debug=False))
if __name__ == "__main__":
main()
| Python |
# Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Custom filters and tags for dates.
"""
from datetime import date
from django.utils import dateformat
from google.appengine.ext.webapp import template
def custom_date_format(value):
"""Converts a date to a string concatenating the month (in full text) with the
day of the month (without leading zeros) and the year (4 digits) if it is not
the current one."""
if not value:
return ''
elif value.year < date.today().year:
return 'Present'
elif value.year == date.today().year:
return dateformat.format(value, 'F j')
else:
return dateformat.format(value, 'F j, Y')
# Prevents pylint from triggering on the 'register' name. Django expects this
# module to have a 'register' variable.
# pylint: disable-msg=C0103
register = template.create_template_register()
register.filter(custom_date_format)
| Python |
# http://code.djangoproject.com/wiki/BasicComparisonFilters
# with modifications to work in AppEngine:
# http://4.flowsnake.org/archives/459
# from django.template import Library
from google.appengine.ext import webapp
def gt(value, arg):
"""Returns a boolean of whether the value is greater than the
argument"""
return value > int(arg)
def lt(value, arg):
"""Returns a boolean of whether the value is less than the argument"""
return value < int(arg)
def gte(value, arg):
"""Returns a boolean of whether the value is greater than or equal to
the argument"""
return value >= int(arg)
def lte(value, arg):
"""Returns a boolean of whether the value is less than or equal to
the argument"""
return value <= int(arg)
def length_gt(value, arg):
"""Returns a boolean of whether the value's length is greater than
the argument"""
return len(value) > int(arg)
def length_lt(value, arg):
"""Returns a boolean of whether the value's length is less than the
argument"""
return len(value) < int(arg)
def length_gte(value, arg):
"""Returns a boolean of whether the value's length is greater than or
equal to the argument"""
return len(value) >= int(arg)
def length_lte(value, arg):
"""Returns a boolean of whether the value's length is less than or
equal to the argument"""
return len(value) <= int(arg)
# This was not in the original library.
def isin(a, b):
"""Checks if a is contained in b."""
return a in b
# register = Library()
register = webapp.template.create_template_register()
register.filter('gt', gt)
register.filter('lt', lt)
register.filter('gte', gte)
register.filter('lte', lte)
register.filter('length_gt', length_gt)
register.filter('length_lt', length_lt)
register.filter('length_gte', length_gte)
register.filter('length_lte', length_lte)
register.filter('isin', isin) | Python |
# http://w.holeso.me/2008/08/a-simple-django-truncate-filter/
# with modifications to work in AppEngine:
# http://4.flowsnake.org/archives/459
"""
Custom filters and tags for strings.
"""
from google.appengine.ext import webapp
def truncate_chars(value, max_length):
"""Truncate filter."""
if len(value) > max_length:
truncated_value = value[:max_length]
ellipsis = ' ...'
if value[max_length] != ' ':
# TODO: Make sure that only whitespace in the data records
# is ascii spaces.
right_index = truncated_value.rfind(' ')
MAX_CHARS_TO_CLIP = 40 # pylint: disable-msg=C0103
if right_index < max_length - MAX_CHARS_TO_CLIP:
right_index = max_length - MAX_CHARS_TO_CLIP
ellipsis = '...' # No separating space
truncated_value = truncated_value[:right_index]
return truncated_value + ellipsis
return value
def as_letter(value):
""" Converts an integer value to a letter (assumption: 0 <= value < 26). """
if 0 <= value < 26:
return chr(ord('A') + value)
else:
return ''
def bold_query(value, query):
""" Bolds all instances of query in value """
if query:
for term in split_query(query):
value = bold_term(value, term)
return value
def bold_term(value, term):
""" Returns value with all instances of term bolded, case-insensitive """
nocase_value = value.lower()
nocase_term = term.lower()
start_loc = nocase_value.find(nocase_term)
if start_loc == -1:
return value
else:
end_loc = start_loc + len(nocase_term)
return '%s<strong>%s</strong>%s' % (value[0:start_loc],
value[start_loc:end_loc], bold_term(value[end_loc:], term))
def split_query(query):
""" Split a search query into a list of terms to bold """
terms = []
# Add terms in quotes
while query.count('"') >= 2:
first = query.find('"')
second = query.find('"', first+1)
# Check if the term should be excluded
start = first-1
while query[start].isspace():
start -= 1
if query[start] != '-':
terms.append(query[first+1:second])
query = '%s %s' % (query[0:start+1], query[second+1:len(query)])
# Remove ANDs and ORs - we only want a list of terms to bold,
# so ANDs and ORs don't matter
query = query.replace(" AND "," ")
query = query.replace(" OR "," ")
# Remove items excluded from the search
while query.count('-') >= 1:
loc = query.find('-')
remainder = query[loc+1:].split(None, 1) # find the text after the -
if len(remainder) > 1: # remove the excluded term from the query
query = '%s %s' % (query[0:loc], remainder[1])
else: # add the - as a term if nothing appears after it
terms.append('-')
query = query[0:loc]
terms += query.split() # Add other terms, separated by spaces
return list(set(terms)) # Return only the unique terms
# Prevents pylint from triggering on the 'register' name. Django expects this
# module to have a 'register' variable.
# pylint: disable-msg=C0103
register = webapp.template.create_template_register()
register.filter('truncate_chars', truncate_chars)
register.filter('as_letter', as_letter)
register.filter('bold_query', bold_query)
| Python |
# Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Miscellaneous utility functions.
"""
import hmac
import logging
import os
from xml.dom import minidom
class Error(Exception):
pass
class InvalidValue(Error):
pass
def get_xml_dom_text(node):
"""Returns the text of the first node found with the given tagname.
Returns None if no node found."""
text = ''
for child in node.childNodes:
if child.nodeType == minidom.Node.TEXT_NODE:
text += child.data
return text
def get_xml_dom_text_ns(node, namespace, tagname):
"""Returns the text of the first node found with the given namespace/tagname.
Returns None if no node found."""
child_nodes = node.getElementsByTagNameNS(namespace, tagname)
if child_nodes:
return get_xml_dom_text(child_nodes[0])
def xml_elem_text(node, tagname, default=None):
"""Returns the text of the first node found with the given namespace/tagname.
returns default if no node found."""
child_nodes = node.getElementsByTagName(tagname)
if child_nodes:
return get_xml_dom_text(child_nodes[0])
return default
# Cached hmac object.
hmac_master = None
def signature(value):
"""Returns a signature for a param so we can compare it later.
Examples: Signature(url) is compared in the url redirector to prevent other
sites from using it. Signtaure(user_cookie) is used to limit XSRF attacks.
"""
if not value:
return None
# This is a super cheesy way of avoiding storing a secret key...
# It'll reset every minor update, but that's OK for now.
global hmac_master
if not hmac_master:
hmac_master = hmac.new(os.getenv('CURRENT_VERSION_ID'))
hmac_object = hmac_master.copy()
hmac_object.update(value)
return hmac_object.hexdigest()
def get_last_arg(request, argname, default):
"""Returns the last urlparam in an HTTP request-- this allows the
later args to override earlier ones, which is easier for developers
(vs. earlier ones taking precedence)."""
values = request.get(argname, allow_multiple=True)
if values:
return values[-1]
return default
def get_verified_arg(pattern, request, argname, default=None, last=True):
"""Return the (last) requested argument, if it passes the pattern.
Args:
pattern: A re pattern, e.g. re.compile('[a-z]*$')
request: webob request
argname: argument to look for
default: value to return if not found
last: use the last argument or first argument?
Returns:
Value in the get param, if prsent and valid. default if not present.
Raises:
InvalidValue exception if present and not valid.
"""
values = request.get(argname, allow_multiple=True)
if not values:
return default
if last:
value = values[-1]
else:
value = value[0]
if not pattern.match(value):
raise InvalidValue
return value
| Python |
# Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Utilities that support views.py.
"""
import logging
import models
import modelutils
from django.utils import simplejson
def get_user_interests(user, remove_no_interest):
"""Get the opportunities a user has expressed interest in.
Args:
user: userinfo.User of a user
remove_no_interest: Filter out items with no expressed interest.
Returns:
Dictionary of volunteer opportunity id: expressed interest (liked).
"""
user_interests = {}
if user:
user_info = user.get_user_info()
# Note: If we want a limit, tack "fetch(nnn)" on the end of the query.
# Also note the descending order, most-recent-first.
# TODO: Make liked_last_modified work (complains about missing index)
#interests = models.UserInterest.all().filter('user = ', user_info)\
# .order('-liked_last_modified')
interests = models.UserInterest.all().filter('user = ', user_info)
ordered_event_ids = []
for interest in interests:
ordered_event_ids.append(interest.opp_id)
interest_value = getattr(interest, models.USER_INTEREST_LIKED)
if not remove_no_interest or interest_value != 0:
user_interests[interest.opp_id] = interest_value
return {'interests': user_interests, 'ordered_event_ids': ordered_event_ids}
def get_interest_for_opportunities(opp_ids):
"""Get the interest statistics for a set of volunteer opportunities.
Args:
opp_ids: list of volunteer opportunity ids.
Returns:
Dictionary of volunteer opportunity id: aggregated interest values.
"""
others_interests = {}
interests = modelutils.get_by_ids(models.VolunteerOpportunityStats, opp_ids)
for (item_id, interest) in interests.iteritems():
if interest:
others_interests[item_id] = getattr(interest, models.USER_INTEREST_LIKED)
return others_interests
def get_annotated_results(user, result_set):
"""Get results annotated with the interests of this user and all users.
Args:
user: User object returned by userinfo.get_user()
result_set: A search.SearchResultSet.
Returns:
The incoming result set, annotated with user-specific info.
"""
# Get all the ids of items we've found
opp_ids = [result.item_id for result in result_set.results]
# mark the items the user is interested in
user_interests = get_user_interests(user, True)
# note the interest of others
others_interests = get_interest_for_opportunities(opp_ids)
return annotate_results(user_interests, others_interests, result_set)
def annotate_results(user_interests, others_interests, result_set):
"""Annotates results with the provided interests.
Args:
user_interests: User interests from get_user_interests. Can be None.
others_interests: Others interests from get_interest_for_opportunities.
Can be None.
result_set: A search.SearchResultSet.
Returns:
The incoming result set, annotated with user-specific info.
"""
# Mark up the results
for result in result_set.results:
if user_interests and result.item_id in user_interests:
result.interest = user_interests[result.item_id]
if others_interests and result.item_id in others_interests:
logging.debug("others interest in %s = %s " %
(result.item_id, others_interests[result.item_id]))
# TODO: Consider updating the base url here if it's changed.
result.interest_count = others_interests[result.item_id]
return result_set
def get_friends_data_for_snippets(user_info):
"""Preps the data required to render the "My Events" aka "Profile" template.
Args:
user_info: userinfo.User for the current user.
Returns:
Dictionary of data required to render the template.
"""
# Get the list of all my friends.
# Assemble the opportunities your friends have starred.
friends = user_info.load_friends()
# For each of my friends, get the list of all events that that friend likes
# or is doing.
# For each of the events found, cross-reference the list of its interested
# users.
friend_opp_count = {}
friends_by_event_id = {}
for friend in friends:
dict = get_user_interests(friend, True)
for event_id in dict['interests']:
count = friend_opp_count.get(event_id, 0)
friend_opp_count[event_id] = count + 1
uids = friends_by_event_id.get(event_id, [])
uids.append(friend.user_id)
friends_by_event_id[event_id] = uids
friends_by_event_id_js = simplejson.dumps(friends_by_event_id)
view_vals = {
'friends': friends,
'friends_by_event_id_js': friends_by_event_id_js,
}
return view_vals
| Python |
# Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
class to represent search results.
"""
import re
import urlparse
import datetime
import time
import hashlib
import logging
from xml.sax.saxutils import escape
from fastpageviews import pagecount
import models
import modelutils
import utils
def get_rfc2822_datetime(when = None):
"""GAE server localtime appears to be UTC and timezone %Z
is an empty string so to satisfy RFC date format
requirements in output=rss we append the offset in hours
from UTC for our local time (now, UTC) i.e. +0000 hours
ref: http://feedvalidator.org/docs/error/InvalidRFC2822Date.html
ref: http://www.feedvalidator.org to check feed validity
eg, Tue, 10 Feb 2009 17:04:28 +0000"""
if not when:
when = time.gmtime()
return time.strftime("%a, %d %b %Y %H:%M:%S", when) + " +0000"
def js_escape(string):
"""quote characters appropriately for javascript.
TODO: This escape method is overly agressive and is messing some snippets
up. We only need to escape single and double quotes."""
return re.escape(string)
class SearchResult(object):
"""class to hold the results of a search to the backend."""
def __init__(self, url, title, snippet, location, item_id, base_url):
# TODO: HACK: workaround for issue 404-- broken servegov links
# hack added here so the urlsig's come out correctly and the fix
# applies everywhere including xml_url, API calls, etc.
url = re.sub(
# regexp written to be very specific to myproject.serve.gov
# and myproject.nationalservice.gov (aka mlk_day), and not
# break once the feed changes
r'(myproject[.].+?[.]gov.+?)subProjectId', r'\1&subProjectId', url)
# TODO: Consider using kwargs or something to make this more generic.
self.url = url
self.url_sig = None
self.title = title
self.snippet = snippet
self.location = location
self.item_id = item_id
self.base_url = base_url
# app engine does not currently support the escapejs filter in templates
# so we have to do it our selves for now
self.js_escaped_title = js_escape(title)
self.js_escaped_snippet = js_escape(snippet)
# TODO: find out why this is not unique
# hack to avoid guid duplicates
self.xml_url = escape(url) + "#" + self.item_id
parsed_url = urlparse.urlparse(url)
self.url_short = parsed_url.netloc
if url.find("volunteer.gov/gov") >= 0:
# hack for volunteer.gov/gov, which is different from
# volunteer.gov/ (which redirects to serve.net)
self.url_short += "/gov"
self.host_website = parsed_url.netloc
if self.host_website.startswith("www."):
self.host_website = self.host_website[4:]
# user's expressed interest
self.interest = None
# stats from other users.
self.interest_count = 0
# TODO: implement quality score
self.quality_score = 0.1
self.impressions = 0
self.pubdate = get_rfc2822_datetime()
self.score = 0.0
self.score_notes = ""
self.score_str = ""
def set_score(self, score, notes):
"""assign score value-- TODO: consider moving scoring code to this class."""
self.score = score
self.score_notes = notes
self.score_str = "%.4g" % (score)
def compare_result_dates(dt1, dt2):
"""private helper function for dedup()"""
if (dt1.t_startdate > dt2.t_startdate):
return 1
elif (dt1.t_startdate < dt2.t_startdate):
return -1
else:
return 0
class SearchResultSet(object):
"""Contains a list of SearchResult objects.
Attributes:
results: List of SearchResults. Required during initialization.
merged_results: This is populated after a call to dedup(). It will
contain the original results, after merging of duplicate entries.
clipped_results: This is populated after a call to clip_merged_results.
It will contain the merged_results, clamped to a start-index and
max-length (the 'start' and 'num' query parameters).
query_url_encoded: URL query used to retrieve data from backend.
For debugging.
query_url_unencoded: urllib.unquote'd version of the above.
num_merged_results: Number of merged results after a dedup()
operation. Used by Django.
estimated_merged_results: estimated number of total results accounting
for merging, given result_set.estimated_backend_results
"""
def __init__(self, query_url_unencoded, query_url_encoded, results):
self.query_url_unencoded = query_url_unencoded
self.query_url_encoded = escape(query_url_encoded)
self.results = results
self.num_results = 0
self.estimated_results = 0
self.num_merged_results = 0
self.merged_results = []
self.clipped_results = []
self.clip_start_index = 0 # Index at which clipped_results begins.
self.has_more_results = False # After clipping, are there more results?
self.estimated_merged_results = 0
self.pubdate = get_rfc2822_datetime()
self.last_build_date = self.pubdate
def append_results(self, results):
"""append a results arry to this results set and rerun dedup()"""
self.num_results = len(self.results) + len(results.results)
self.results.extend(results.results)
self.merged_results = []
self.clipped_results = []
self.dedup()
def clip_set(self, start, num, result_set):
"""Extract just the slice of merged results from start to start+num.
No need for bounds-checking -- python list slicing does that
automatically. Indexed from 1."""
start -= 1 # Adjust to zero indexing.
self.clipped_results = result_set[start:start + num]
self.clip_start_index = start
if self.estimated_merged_results > start + num:
self.has_more_results = True
def clip_merged_results(self, start, num):
"""clip to start/num using the merged results."""
logging.debug("clip_merged_results: start=%d num=%d has_more=%s "
"(merged len = %d)" %
(start, num, str(self.has_more_results),
len(self.merged_results)))
return self.clip_set(start, num, self.merged_results)
def clip_results(self, start, num):
"""clip to start/num using the unmerged (original) results."""
return self.clip_set(start, num, self.results)
def track_views(self, num_to_incr=1):
"""increment impression counts for items in the set."""
logging.debug(str(datetime.datetime.now())+" track_views: start")
for primary_res in self.clipped_results:
#logging.debug("track_views: key="+primary_res.merge_key)
primary_res.merged_impressions = pagecount.IncrPageCount(
pagecount.VIEWS_PREFIX+primary_res.merge_key, num_to_incr)
# TODO: for now (performance), only track merge_keys, not individual items
#primary_res.impressions = pagecount.IncrPageCount(primary_res.item_id, 1)
#for res in primary_res.merged_list:
# res.impressions = pagecount.IncrPageCount(res.item_id, 1)
logging.debug(str(datetime.datetime.now())+" track_views: end")
def dedup(self):
"""modify in place, merged by title and snippet."""
def safe_str(instr):
"""private helper function for dedup()"""
return_val = ""
try:
return_val = str(instr)
except ValueError:
for inchar in instr:
try:
safe_char = str(inchar)
return_val += safe_char
except ValueError:
continue # discard
return return_val
def assign_merge_keys():
"""private helper function for dedup()"""
for res in self.results:
# Merge keys are M + md5hash(some stuff). This distinguishes them from
# the stable IDs, which are just md5hash(someotherstuff).
res.merge_key = 'M' + hashlib.md5(safe_str(res.title) +
safe_str(res.snippet) +
safe_str(res.location)).hexdigest()
res.url_sig = utils.signature(res.url + res.merge_key)
# we will be sorting & de-duping the merged results
# by start date so we need an epoch time
res.t_startdate = res.startdate.timetuple()
# month_day used by django
res.month_day = (time.strftime("%B", res.t_startdate) + " " +
str(int(time.strftime("%d", res.t_startdate))))
# this is for the list of any results merged with this one
res.merged_list = []
res.merged_debug = []
def merge_result(res):
"""private helper function for dedup()"""
merged = False
for i, primary_result in enumerate(self.merged_results):
if primary_result.merge_key == res.merge_key:
# merge it
listed = False
for merged_result in self.merged_results[i].merged_list:
# do we already have this date + url?
if (merged_result.t_startdate == self.merged_results[i].t_startdate
and merged_result.url == self.merged_results[i].url):
listed = True
break
if not listed:
self.merged_results[i].merged_list.append(res)
self.merged_results[i].merged_debug.append(res.location + ":" +
res.startdate.strftime("%Y-%m-%d"))
merged = True
break
if not merged:
self.merged_results.append(res)
def compute_more_less():
"""Now we are making something for the django template to display
for the merged list we only show the unique locations and dates
but we also use the url if it is unique too
for more than 2 extras we will offer "more" and "less"
we will be showing the unique dates as "Month Date"."""
for i, res in enumerate(self.merged_results):
res.idx = i + 1
if len(res.merged_list) > 1:
res.merged_list.sort(cmp=compare_result_dates)
location_was = res.location
res.less_list = []
if len(res.merged_list) > 2:
more_id = "more_" + str(res.idx)
res.more_id = more_id
res.more_list = []
more = 0
res.have_more = True
for merged_result in res.merged_list:
def make_linkable(text, merged_result, res):
"""generate HTML hyperlink for text if merged_result != res."""
if merged_result.url != res.url:
return '<a href="' + merged_result.url + '">' + text + '</a>'
else:
return text
entry = ""
if merged_result.location != location_was:
location_was = merged_result.location
entry += ('<br/>'
+ make_linkable(merged_result.location, merged_result, res)
+ ' on ')
elif more > 0:
entry += ', '
entry += make_linkable(merged_result.month_day, merged_result, res)
if more < 3:
res.less_list.append(entry)
else:
res.more_list.append(entry)
more += 1
def remove_blacklisted_results():
"""Private helper function for dedup().
Looks up stats for each result and deletes blacklisted results."""
opp_ids = [result.merge_key for result in self.results]
opp_stats = modelutils.get_by_ids(models.VolunteerOpportunityStats,
opp_ids)
unknown_keys = set()
nonblacklisted_results = []
for result in self.results:
if result.merge_key not in opp_stats:
unknown_keys.add(result.merge_key)
nonblacklisted_results.append(result)
elif not opp_stats[result.merge_key].blacklisted:
nonblacklisted_results.append(result)
self.results = nonblacklisted_results
if unknown_keys:
# This probably shouldn't be done right here... but we'll stuff these
# in the memcache to prevent future datastore lookups.
logging.debug('Found unblacklisted items which had no memcache or ' +
'datastore entries. Adding to memcache. Items: %s',
unknown_keys)
models.VolunteerOpportunityStats.add_default_entities_to_memcache(
unknown_keys)
# dedup() main code
assign_merge_keys()
remove_blacklisted_results()
for res in self.results:
merge_result(res)
compute_more_less()
self.num_merged_results = len(self.merged_results)
if len(self.results) == 0:
self.estimated_merged_results = self.estimated_results
else:
self.estimated_merged_results = int(self.estimated_results *
self.num_merged_results / len(self.results))
| Python |
# Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Various test pages.
# view classes aren inherently not pylint-compatible
# pylint: disable-msg=C0103
# pylint: disable-msg=W0232
# pylint: disable-msg=E1101
# pylint: disable-msg=R0903
import cgi
import datetime
import re
from google.appengine.ext import db
from google.appengine.api import users
from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import run_wsgi_app
import models
import userinfo
import utils
# require_admin is copied from views.py so we don't have to import all of it.
def require_admin(handler_method):
"""Decorator ensuring the current App Engine user is an administrator."""
def Decorate(self):
if not users.is_current_user_admin():
self.error(401)
html = '<html><body><a href="%s">Sign in</a></body></html>'
self.response.out.write(html % (users.create_login_url(self.request.url)))
return
return handler_method(self)
return Decorate
class TestLogin(webapp.RequestHandler):
"""test user login sequence."""
# This is still useful for testing but we'll limit it to just admins.
@require_admin
def get(self):
"""HTTP get method."""
user = userinfo.get_user(self.request)
self.response.out.write('Login info<ul>')
if user:
self.response.out.write('<li>Account type: %s'
'<li>User_id: %s'
'<li>User_info: %s'
'<li>Name: %s'
'<li>Moderator: %s'
'<li>Image: %s <img src="%s" />' %
(user.account_type,
user.user_id,
user.get_user_info(),
user.display_name,
user.get_user_info().moderator,
user.thumbnail_url,
user.thumbnail_url))
else:
self.response.out.write('<li>Not logged in.')
self.response.out.write('<li>Total # of users: %s' %
models.UserStats.get_count())
self.response.out.write('</ul>')
self.response.out.write('<form method="POST">'
'Userid: <input name="userid" />'
'<input name="Test Login" type="submit" />'
'(Blank form = logout)'
'</form>')
USERID_REGEX = re.compile('[a-z0-9_@+.-]*$')
@require_admin
def post(self):
"""HTTP post method."""
try:
userid = utils.get_verified_arg(self.USERID_REGEX, self.request,
'userid')
except utils.InvalidValue:
self.error(400)
self.response.out.write('invalid userid, must be ^%s' %
self.USERID_REGEX.pattern)
return
self.response.headers.add_header('Set-Cookie',
'footprinttest=%s;path=/' % userid)
self.response.out.write('You are logged ')
if userid:
self.response.out.write('in!')
else:
self.response.out.write('out!')
self.response.out.write('<br><a href="%s">Continue</a>' % self.request.url)
class TestModerator(webapp.RequestHandler):
"""test moderation functionality."""
def get(self):
"""HTTP get method."""
user = userinfo.get_user(self.request)
if not user:
self.response.out.write('Not logged in.')
return
self.response.out.write('Moderator Request<ul>')
if user.get_user_info().moderator:
self.response.out.write('<li>You are already a moderator.')
if user.get_user_info().moderator_request_email:
# TODO: This is very vulnerable to html injection.
self.response.out.write('<li>We have received your request'
'<li>Your email: %s'
'<li>Your comments: %s' %
(cgi.escape(user.get_user_info().moderator_request_email),
cgi.escape(user.get_user_info().moderator_request_desc)))
self.response.out.write('</ul>')
self.response.out.write(
'<form method="POST">'
'Your email address: <input name="email" /><br>'
'Why you want to be a moderator: <br><textarea name="desc"></textarea>'
'<br><input type="submit" name="submit"/>'
'</form>')
def post(self):
"""HTTP post method."""
# todo: xsrf protection
user = userinfo.get_user(self.request)
if not user:
self.response.out.write('Not logged in.')
return
try:
# This regex is a bit sloppy but good enough.
email = utils.get_verified_arg(re.compile('[a-z0-9_+.-]+@[a-z0-9.-]+$'),
self.request, 'email')
desc = self.request.get('desc')
except utils.InvalidValue:
self.error(400)
self.response.out.write('<div style="color:red">' +
'Valid email address required.</div>')
return
user_info = user.get_user_info()
user_info.moderator_request_email = self.request.get('email')
user_info.moderator_request_desc = self.request.get('desc')
if not user_info.moderator_request_admin_notes:
user_info.moderator_request_admin_notes = ''
user_info.moderator_request_admin_notes += (
'%s: Requested.\n' %
datetime.datetime.isoformat(datetime.datetime.now()))
user_info.put()
return self.get()
APP = webapp.WSGIApplication([
('/test/login', TestLogin),
('/test/moderator', TestModerator),
], debug=True)
def main():
"""main() for standalone execution."""
run_wsgi_app(APP)
if __name__ == '__main__':
main()
| Python |
# Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
API query parameters. Defined here as symbolic constants to ensure
typos become compile-time errors.
"""
PARAM_NUM = 'num'
CONST_MIN_NUM = 1
CONST_MAX_NUM = 999
PARAM_START = 'start'
CONST_MIN_START = 1
CONST_MAX_START = 1000
PARAM_OUTPUT = 'output'
PARAM_Q = 'q'
PARAM_SORT = 'sort'
PARAM_CACHE = 'cache'
PARAM_FIELDS = 'fields'
# E.g., 'today'. The presence of this param implies that 'vol_startdate'
# and 'vol_enddate' will be automatically calculated, overriding
# the values of those two params if they were passed in also.
PARAM_TIMEPERIOD = 'timeperiod'
# the ratio of actual results to request from the backend--
# typical values range from 1.0 to 10.0, where larger numbers
# provide better quality results at a linear incease in latency
# This internal value is exposed as an URL parameter so we can
# run performance tests, please email engineering before using
# this in apps, so we don't change it later.
PARAM_OVERFETCH_RATIO = 'overfetch'
# TODO: define other constants in api.py, eg...
CONST_MIN_OVERFETCH_RATIO = 1.0
CONST_MAX_OVERFETCH_RATIO = 10.0
CONST_MAX_FETCH_DEADLINE = 10
PARAM_VOL_LOC = 'vol_loc'
PARAM_VOL_DIST = 'vol_dist'
PARAM_VOL_STARTDATE = 'vol_startdate'
PARAM_VOL_ENDDATE = 'vol_enddate'
PARAM_VOL_DURATION = 'vol_duration'
PARAM_VOL_TZ = 'vol_tz'
PARAM_VOL_PROVIDER = 'vol_provider'
PARAM_VOL_STARTDAYOFWEEK = 'vol_startdayofweek'
| Python |
#!/usr/bin/python2.5
# Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Configuration class.
This class provides a dictionary of run-time configuration options for the
application.
You can edit the values in the datastore editor of the admin console or other
datastore editing tools.
The values are cached both in memcache (which can be flushed) and locally
in the running Python instance, which has an indeterminite but typically short
life time.
To use the class:
import config
configvalue = config.config.get_value('valuename')
"""
from google.appengine.api import memcache
from google.appengine.ext import db
class Config(db.Model):
"""Configuration parameters.
The key name is used as the name of the parameter.
"""
description = db.StringProperty()
value = db.StringProperty(required=True)
MEMCACHE_ENTRY = 'Config'
# Warning: do not add private/secret configuration values used in production
# to these default values. The default values are intended for development.
# Production values must be stored in the datastore.
DEFAULT_VALUES = {}
local_config_cache = None
@classmethod
def get_value(cls, name):
"""Retrieves the value of a configuration parameter.
Args:
name: the name of the parameter whose value we are looking for.
Returns:
The value of the parameter or None if the parameter is unknown.
"""
if cls.local_config_cache is None:
# The local cache is empty, retrieve its content from memcache.
cache = memcache.get(cls.MEMCACHE_ENTRY)
if cache is None:
# Nothing in memcache either, recreate the cache from the datastore.
cache = dict(cls.DEFAULT_VALUES)
for parameter in Config.all():
cache[parameter.key().name()] = parameter.value
# Save the full cache in memcache with 1h expiration time.
memcache.add(cls.MEMCACHE_ENTRY, cache, 60*60)
cls.local_config_cache = cache
# Retrieve the value from the cache.
return cls.local_config_cache.get(name)
| Python |
# pagecount.py
#
# memcache-based counter with occasional db writeback.
# This is suitable for realtime pageviews counts, YMMV for other uses.
#
from google.appengine.api import memcache
from google.appengine.ext import db
import logging
import random
# this is a key used for testing and calls made with it
# are seperated from the production stats
TEST_API_KEY = 'testkey'
VIEWS_PREFIX = "v:"
CLICKS_PREFIX = "c:"
# increase as needed
WRITEBACK_FREQ_PCT = 1.0
WRITEBACK_VAL = WRITEBACK_FREQ_PCT / 100.0 * 1000000.0
pc_writebacks = 0
pc_loads = 0
class PageCountShard(db.Model):
count = db.IntegerProperty(default=0)
def KeyName(pagename):
return 'pc:' + pagename
def Writeback(pagename, value):
global pc_writebacks
pc_writebacks = pc_writebacks + 1
logging.debug("pagecount.Writeback(pagename='%s', value=%d" % (pagename, +value))
record = PageCountShard(key_name=KeyName(pagename), count=value)
try:
record.put()
except:
logging.warning("pagecount.Writeback(pagename='%s', value=%d" % (pagename, +value))
def LoadPageCount(pagename):
global pc_loads
pc_loads = pc_loads + 1
logging.debug("pagecount.LoadPageCount(pagename='"+pagename+"')")
try:
record = PageCountShard.get_by_key_name(KeyName(pagename))
except:
record = None
if record != None:
return record.count
db.run_in_transaction(Writeback, pagename, 0)
return 0
# initializes memcache if missing
def GetPageCount(pagename):
logging.debug("pagecount.GetPageCount(pagename='"+pagename+"')")
memcache_id = KeyName(pagename)
val = memcache.get(memcache_id)
if (val != None):
return val
val = LoadPageCount(pagename)
memcache_id = KeyName(pagename)
memcache.set(memcache_id, val)
return val
def IncrPageCount(pagename, delta):
""" increment page count """
logging.debug("pagecount.IncrPageCount(pagename='"+pagename+"')")
memcache_id = KeyName(pagename)
if memcache.get(memcache_id) == None:
# initializes memcache if missing
return GetPageCount(pagename)
newval = memcache.incr(memcache_id, delta)
#TODO: rnd seems unused?
#rnd = random.random() * 1000000.0
if (random.random() * 1000000.0 <= WRITEBACK_FREQ_PCT / 100.0 * 1000000.0):
logging.debug("pagecount.IncrPageCount: writeback: writebacks="+
str(pc_writebacks)+" newval="+str(newval))
db.run_in_transaction(Writeback, pagename, newval)
return newval
def GetStats():
global pc_writebacks, pc_loads
stats = memcache.get_stats()
stats['pc_writebacks'] = pc_writebacks
stats['pc_loads'] = pc_loads
return stats
| Python |
# Copyright 2009 Google Inc. All Rights Reserved.
#
import cgi
import os
from google.appengine.ext import webapp
from google.appengine.ext.webapp import template
from google.appengine.ext.webapp.util import run_wsgi_app
import pagecount
TEST_PAGEVIEWS_TEMPLATE = 'test_pageviews.html'
def RenderTemplate(template_filename, template_values):
path = os.path.join(os.path.dirname(__file__), template_filename)
return template.render(path, template_values)
class TestPageViewsView(webapp.RequestHandler):
def get(self):
pagename = "testpage:%s" % (self.request.get('pagename'))
pc = pagecount.IncrPageCount(pagename, 1)
template_values = pagecount.GetStats()
template_values['pagename'] = pagename
template_values['pageviews'] = pc
self.response.out.write(RenderTemplate(TEST_PAGEVIEWS_TEMPLATE,
template_values))
class NopView(webapp.RequestHandler):
def get(self):
self.response.out.write("hello world")
| Python |
import cgi
from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import run_wsgi_app
from google.appengine.ext import db
import views
application = webapp.WSGIApplication([('/', views.TestPageViewsView),
('/nop', views.NopView),
],
debug=True)
def main():
run_wsgi_app(application)
if __name__ == "__main__":
main()
| Python |
# Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import models
import os
import sys
import unittest
import time
from google.appengine.api import users
from google.appengine.api import urlfetch
from google.appengine.api import apiproxy_stub_map
from google.appengine.api import urlfetch_stub
from google.appengine.api import user_service_stub
from google.appengine.api import unittest
class TestModel(unittest.TestCase):
def test_lame(self):
self.assertEqual(1, 1)
"""
def test_add_quote(self):
# Add and remove quotes from the system.
user = users.User('joe@example.com')
quoteid = models.add_quote('This is a test.', user)
time.sleep(1.1)
quoteid2 = models.add_quote('This is a test2.', user)
self.assertNotEqual(quoteid, None)
self.assertNotEqual(quoteid, 0)
# Get the added quotes by creation order
quotes, next = models.get_quotes_newest()
self.assertEqual(quotes[0].key().id(), quoteid2)
self.assertEqual(models.get_quote(quoteid2).key().id(), quoteid2)
self.assertEqual(len(quotes), 2)
# Remove one quote
models.del_quote(quoteid2, user)
quotes, next = models.get_quotes_newest()
self.assertEqual(quotes[0].key().id(), quoteid)
self.assertEqual(len(quotes), 1)
# Remove last remaining quote
models.del_quote(quoteid, user)
quotes, next = models.get_quotes_newest()
self.assertEqual(len(quotes), 0)
def test_del_quote_perms(self):
# Permissions of removing quotes.
user = users.User('joe@example.com')
user2 = users.User('fred@example.com')
quoteid = models.add_quote('This is a test.', user)
# Get the added quotes by creation order
quotes, next = models.get_quotes_newest()
self.assertEqual(quotes[0].key().id(), quoteid)
self.assertEqual(len(quotes), 1)
# Remove one quote, should fail to remove the quote
models.del_quote(quoteid, user2)
# Confirm the quote is still in the system
quotes, next = models.get_quotes_newest()
self.assertEqual(quotes[0].key().id(), quoteid)
self.assertEqual(len(quotes), 1)
# Remove one remaining quote
models.del_quote(quoteid, user)
quotes, next = models.get_quotes_newest()
self.assertEqual(len(quotes), 0)
def test_del_non_existent(self):
user = users.User('joe@example.com')
models.del_quote(1, user)
def test_paging_newest(self):
# Test that we can page through the quotes inthe order that they were added.
user = users.User('joe@example.com')
for i in range(models.PAGE_SIZE):
quoteid = models.add_quote('This is a test.', user)
self.assertNotEqual(quoteid, None)
quotes, next = models.get_quotes_newest()
self.assertEqual(len(quotes), models.PAGE_SIZE)
self.assertEqual(next, None)
quoteid = models.add_quote('This is a test.', user)
self.assertNotEqual(quoteid, None)
quotes, next = models.get_quotes_newest()
self.assertEqual(len(quotes), models.PAGE_SIZE)
self.assertNotEqual(next, None)
quotes, next = models.get_quotes_newest(next)
self.assertEqual(len(quotes), 1)
self.assertEqual(next, None)
# Cleanup
models.del_quote(quoteid, user)
quotes, next = models.get_quotes_newest()
for q in quotes:
models.del_quote(q.key().id(), user)
def test_game_progress(self):
email = 'fred@example.com'
user = users.User(email)
hasVoted, hasAddedQuote = models.get_progress(user)
self.assertFalse(hasVoted)
self.assertFalse(hasAddedQuote)
quoteid0 = models.add_quote('This is a test.', user, _created=1)
hasVoted, hasAddedQuote = models.get_progress(user)
self.assertFalse(hasVoted)
self.assertTrue(hasAddedQuote)
models.set_vote(quoteid0, user, 1)
hasVoted, hasAddedQuote = models.get_progress(user)
self.assertTrue(hasVoted)
self.assertTrue(hasAddedQuote)
def test_voting(self):
# Test the voting system behaves as defined in the design document.
user = users.User('fred@example.com')
user2 = users.User('barney@example.com')
# Day 1 - [quote 0 and 1 are added on Day 1 and
# get 5 and 3 votes respectively. Rank is q0, q1.]
# q0 (5) = 1 * 4 + 5 = 9
# q1 (3) = 1 * 4 + 3 = 7
quoteid0 = models.add_quote('This is a test.', user, _created=1)
quoteid1 = models.add_quote('This is a test.', user, _created=1)
models.set_vote(quoteid0, user, 1)
models.set_vote(quoteid1, user, 3)
quotes, next = models.get_quotes()
self.assertEqual(models.voted(quotes[1], user), 1)
self.assertEqual(models.voted(quotes[0], user), 3)
self.assertEqual(quotes[0].key().id(), quoteid1)
self.assertEqual(quotes[1].key().id(), quoteid0)
models.set_vote(quoteid0, user, 5)
quotes, next = models.get_quotes()
self.assertEqual(quotes[0].key().id(), quoteid0)
self.assertEqual(quotes[1].key().id(), quoteid1)
# q0 (5) + (3) = 1 * 4 + 8 = 12
# q1 (3) + (0) = 1 * 4 + 3 = 7
# q2 (3) = 2 * 4 + 3 = 11
quoteid2 = models.add_quote('This is a test.', user, _created=2)
models.set_vote(quoteid0, user, 8)
models.set_vote(quoteid1, user, 3)
models.set_vote(quoteid2, user, 3)
quotes, next = models.get_quotes()
self.assertEqual(quotes[0].key().id(), quoteid0)
self.assertEqual(quotes[1].key().id(), quoteid2)
self.assertEqual(quotes[2].key().id(), quoteid1)
# q0 (5) + (3) = 1 * 4 + 8 = 12
# q1 (3) + (0) = 1 * 4 + 3 = 7
# q2 (3) + (2) = 2 * 5 + 4 = 14
# q3 (5) = 3 * 4 + 5 = 17
quoteid3 = models.add_quote('This is a test.', user, _created=3)
models.set_vote(quoteid0, user, 8)
models.set_vote(quoteid1, user, 3)
models.set_vote(quoteid2, user, 5)
models.set_vote(quoteid3, user, 5)
quotes, next = models.get_quotes()
self.assertEqual(quotes[0].key().id(), quoteid3)
self.assertEqual(quotes[1].key().id(), quoteid2)
self.assertEqual(quotes[2].key().id(), quoteid0)
self.assertEqual(quotes[3].key().id(), quoteid1)
# q0 (5) + (3) + (1) = 1 * 4 + 9 = 13
# q1 (3) + (0) = 1 * 4 + 3 = 7
# q2 (3) + (2) = 2 * 5 + 4 = 14
# q3 (0) = 3 * 4 + 0 = 12
models.set_vote(quoteid0, user, 9)
models.set_vote(quoteid3, user, 0)
quotes, next = models.get_quotes()
self.assertEqual(quotes[0].key().id(), quoteid2)
self.assertEqual(quotes[1].key().id(), quoteid0)
self.assertEqual(quotes[2].key().id(), quoteid3)
self.assertEqual(quotes[3].key().id(), quoteid1)
models.del_quote(quoteid0, user)
models.del_quote(quoteid1, user)
models.del_quote(quoteid2, user)
models.del_quote(quoteid3, user)
"""
| Python |
# Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
main() for API testing framework
"""
# view classes aren inherently not pylint-compatible
# pylint: disable-msg=C0103
# pylint: disable-msg=W0232
# pylint: disable-msg=E1101
# pylint: disable-msg=R0903
import re
import os
from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import run_wsgi_app
import utils
import testapi.helpers
class DumpSampleData(webapp.RequestHandler):
"""print the contents of the static XML file."""
def get(self):
"""HTTP get method."""
path = os.path.join(os.path.dirname(__file__),
testapi.helpers.CURRENT_STATIC_XML)
xmlfh = open(path, 'r')
self.response.headers['Content-Type'] = 'text/xml'
self.response.out.write(xmlfh.read())
class RunTests(webapp.RequestHandler):
"""main for running all tests."""
def get(self):
"""HTTP get method."""
ok_pattern = re.compile('[a-z0-9_,:/-]*$')
try:
testType = utils.get_verified_arg(ok_pattern, self.request,
'test_type') or 'all'
responseTypes = (utils.get_verified_arg(ok_pattern, self.request,
'response_types') or
testapi.helpers.DEFAULT_RESPONSE_TYPES)
remoteUrl = utils.get_verified_arg(ok_pattern, self.request, 'url')
specialOutput = utils.get_verified_arg(ok_pattern, self.request,
'output')
# cache=0: don't read from cache, else read from cache.
read_from_cache = not (self.request.get('cache') == '0')
except utils.InvalidValue:
self.error(400)
return
if specialOutput == 'test_types':
self.response.out.write(testapi.helpers.ALL_TEST_TYPES)
return
errors = ''
if not remoteUrl:
errors = 'No remote url given in request, using default url'
apiUrl = testapi.helpers.DEFAULT_TEST_URL
else:
apiUrl = remoteUrl
outstr = ""
outstr += '<style>'
outstr += 'p {font-family: Arial, sans-serif; font-size: 10pt; margin: 0;}'
outstr += 'p.error {color: #880000;}'
outstr += '.test {font-size: 12pt; font-weight: bold; margin-top: 12px;}'
outstr += '.uri {font-size: 10pt; font-weight: normal; color: gray;'
outstr += ' margin-left: 0px;}'
outstr += '.result {font-size: 11pt; font-weight: normal; '
outstr += ' margin-left: 8px; margin-bottom: 4px;}'
outstr += '.fail {color: #880000;}'
outstr += '.success {color: #008800;}'
outstr += '.amplification {color: gray; margin-left: 16px;}'
outstr += '</style>'
# strip whitespace
outstr = re.sub(r' ', '', outstr) + "\n"
if read_from_cache:
outstr += '<h1>Reading test: ' + testType + ' from the datastore</h1>'
else:
outstr += '<h1>Running test: ' + testType + '</h1>'
outstr += '<p class="error">' + errors + '</p>'
outstr += '<p>Response types: ' + responseTypes + '</p>'
outstr += '<p>API url: ' + apiUrl + '</p>'
self.response.out.write(outstr)
final_status = 200
responseTypes = responseTypes.split(',')
for responseType in responseTypes:
api_testing = testapi.helpers.ApiTesting(self)
api_testing.run_tests(testType, apiUrl, responseType, read_from_cache)
if api_testing.num_failures > 0 and final_status != 500:
final_status = 500
self.response.set_status(final_status)
APP = webapp.WSGIApplication(
[('/testapi/run', RunTests),
('/testapi/sampleData.xml', DumpSampleData)],
debug=True)
def main():
"""main for standalone execution."""
run_wsgi_app(APP)
if __name__ == "__main__":
main()
| Python |
# Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
core classes for testing the API.
"""
from google.appengine.api import urlfetch
from google.appengine.api import memcache
from xml.dom import minidom
import xml.sax.saxutils
import re
import hashlib
import random
import math
import logging
from urlparse import urlsplit
from google.appengine.ext import db
from urllib import urlencode
from fastpageviews.pagecount import TEST_API_KEY
DEFAULT_TEST_URL = 'http://footprint2009dev.appspot.com/api/volopps'
DEFAULT_RESPONSE_TYPES = 'rss'
LOCAL_STATIC_URL = 'http://localhost:8080/test/sampleData.xml'
CURRENT_STATIC_XML = 'sampleData0.1.xml'
#'query, num, start, provider'
"""
when we paginate, the overfetch causes lower-ranking Base results to
be considered for top slots, and sometimes they win them because we
use a very different ranking/ordering algorithm. This also means that
changing &num= can change ranking/ordering as well, and worse, as
we paginate through results, we might actually see the same results
again (rarely).
disabling the &start test for now
"""
#ALL_TEST_TYPES = 'num, query, provider, start, geo, snippets'
ALL_TEST_TYPES = 'num, query, provider, geo, snippets'
class TestResultCode(db.IntegerProperty):
"""success and failure types."""
# pylint: disable-msg=W0232
# pylint: disable-msg=R0903
PASS = 0
UNKNOWN_FAIL = 1
INTERNAL_ERROR = 2
LOW_LEVEL_PARSE_FAIL = 3
DATA_MISMATCH = 4
class TestResults(db.Model):
"""results of running tests."""
# pylint: disable-msg=W0232
# pylint: disable-msg=R0903
timestamp = db.DateTimeProperty(auto_now=True)
test_type = db.StringProperty()
result_code = TestResultCode()
result_string = db.StringProperty()
class ApiResult(object):
"""result object used for testing."""
# pylint: disable-msg=R0903
def __init__(self, item_id, title, description, url, provider, latlong):
self.item_id = item_id
self.title = title
self.description = description
self.url = url
self.provider = provider
self.latlong = latlong
def get_node_data(entity):
"""returns the value of a DOM node with some escaping, substituting
"" (empty string) if no child/value is found."""
if entity.firstChild == None:
return ""
if entity.firstChild.data == None:
return ""
nodestr = entity.firstChild.data
nodestr = xml.sax.saxutils.escape(nodestr).encode('UTF-8')
nodestr = re.sub(r'\n', r'\\n', nodestr)
return nodestr
def get_children_by_tagname(elem, name):
"""returns a list of children nodes whose name matches."""
# TODO: use list comprehension?
temp = []
for child in elem.childNodes:
if child.nodeType == child.ELEMENT_NODE and child.nodeName == name:
temp.append(child)
return temp
def get_tag_value(entity, tag):
"""within entity, find th first child with the given tagname, then
return its value, processed to UTF-8 and with newlines escaped."""
#print "----------------------------------------"
nodes = entity.getElementsByTagName(tag)
#print "nodes: "
#print nodes
if nodes.length == 0:
return ""
#print nodes[0]
if nodes[0] == None:
return ""
if nodes[0].firstChild == None:
return ""
if nodes[0].firstChild.data == None:
return ""
#print nodes[0].firstChild.data
outstr = nodes[0].firstChild.data
outstr = xml.sax.saxutils.escape(outstr).encode('UTF-8')
outstr = re.sub(r'\n', r'\\n', outstr)
return outstr
def parse_rss(data):
"""convert an RSS response to an ApiResult."""
result = []
xmldoc = minidom.parseString(data)
items = xmldoc.getElementsByTagName('item')
for item in items:
api_result = (ApiResult(
get_tag_value(item, 'fp:id'),
get_tag_value(item, 'title'),
get_tag_value(item, 'description'),
get_tag_value(item, 'link'),
get_tag_value(item, 'fp:provider'),
get_tag_value(item, 'fp:latlong')))
result.append(api_result)
return result
def random_item(items):
"""pick a random item from a list. TODO: is there a more concise
way to do this in python?"""
num_items = len(items)
if num_items == 1:
return items[0]
else:
return items[random.randrange(0, num_items - 1)]
def retrieve_raw_data(full_uri):
"""call urlfetch and cache the results in memcache."""
print full_uri
memcache_key = hashlib.md5('api_test_data:' + full_uri).hexdigest()
result_content = memcache.get(memcache_key)
if not result_content:
fetch_result = urlfetch.fetch(full_uri)
if fetch_result.status_code != 200:
return None
result_content = fetch_result.content
# memcache.set(memcache_key, result_content, time=300)
return result_content
def in_location(opp, loc, radius):
"""is given opportunity within the radius of loc?"""
loc_arr = loc.split(',')
opp_arr = opp.latlong.split(',')
loc_lat = math.radians(float(loc_arr[0].strip()))
loc_lng = math.radians(float(loc_arr[1].strip()))
opp_lat = math.radians(float(opp_arr[0].strip()))
opp_lng = math.radians(float(opp_arr[1].strip()))
dlng = opp_lng - loc_lng
dlat = opp_lat - loc_lat #lat_2 - lat_1
# TODO: rename a_val and c_val to human-readable (named for pylint)
a_val = (math.sin(dlat / 2))**2 + \
(math.sin(dlng / 2))**2 * math.cos(loc_lat) * math.cos(opp_lat)
c_val = 2 * math.asin(min(1, math.sqrt(a_val)))
dist = 3956 * c_val
return (dist <= radius)
class ApiTesting(object):
"""class to hold testing methods."""
# pylint: disable-msg=R0904
def __init__(self, wsfi_app):
self.web_app = wsfi_app
self.num_failures = 0
self.api_url = None
self.response_type = None
self.test_type = ""
def success(self, datastore_insert):
self.datastore_insert = datastore_insert
"""test whether to insert entity into the Datastore"""
if self.datastore_insert:
"""report test success. returns True to make it easy on callers."""
res = TestResults(test_type=self.test_type, result_code=TestResultCode.PASS)
res.put()
self.output('<p class="result success">Passed</p>')
return True
def fail(self, code, msg, datastore_insert):
self.datastore_insert = datastore_insert
"""test whether to insert entity into the Datastore"""
if self.datastore_insert:
"""report test failure. returns False to make it easy on callers."""
res = TestResults(test_type=self.test_type, result_code=code,
result_string=msg)
res.put()
self.num_failures += 1
self.output('<p class="result fail">Fail. <span>'+msg+'</span></p>')
# stick something in the logs, so it shows up in the appengine dashboard
logging.error('testapi fail: '+msg)
return False
def print_details(self, msg):
"""print extra error details for humans, which aren't logged.
returns False for convenience of callers."""
self.output('<p class="result amplification">'+msg+'</p>')
return False
def output(self, html):
"""macro: output some HTML."""
self.web_app.response.out.write(html)
def make_uri(self, options):
"""generate an API call given args."""
result = self.api_url + '?output=' + self.response_type + '&'
result += 'key=%s&' % TEST_API_KEY
result += urlencode(options)
logging.debug('testapi.helpers.make_uri = %s' % result)
return result
def assert_nonempty_results(self, result_set):
"""require that the results are valid (returns true/false).
Handles the fail() call internally, but not the success() call."""
if result_set is None or result_set == False or len(result_set) == 0:
return self.fail(
TestResultCode.DATA_MISMATCH,
"test_"+self.test_type+": expected non-empty results.", True)
return True
def assert_empty_results(self, result_set):
"""require that the results are valid (returns true/false).
Handles the fail() call internally, but not the success() call."""
if result_set is None:
return self.fail(
TestResultCode.INTERNAL_ERROR,
"test_"+self.test_type+": result_set invalid.", True)
if len(result_set) == 0:
return True
return self.fail(
TestResultCode.DATA_MISMATCH,
"test_"+self.test_type+": expected empty results.", True)
def parse_raw_data(self, data):
"""wrapper for parse_TYPE()."""
if self.response_type == 'rss':
return parse_rss(data)
elif self.response_type == 'xml':
# TODO: implement: return self.parse_xml(data)
return []
return []
def run_test(self, test_type):
"""run one test."""
self.test_type = test_type.strip()
msg = 'test_type='+self.test_type
if self.response_type != "rss":
msg += '&output=' + self.response_type
self.output('<p class="test">Running <em>'+msg+'</em></p>')
test_func = getattr(self, 'test_' + self.test_type, None)
if callable(test_func):
return test_func()
return self.fail(
TestResultCode.INTERNAL_ERROR,
'No such test <strong>'+self.test_type+'</strong> in suite.', True)
def datastore_test_check(self, testresult, test_type):
"""read the TestResult object and report success or failure"""
self.test_type = test_type
self.testresult = testresult
msg = 'test_type=' +self.test_type
datemsg = 'Date of last run: ' +str(self.testresult.timestamp)
if self.response_type != "rss":
msg += '&output=' + self.response_type
self.output('<p class="test">Checking Datastore for <em>'+msg+'</em></p>'+datemsg)
if self.testresult.result_code == 0:
return self.success(False)
elif self.testresult.result_code == 4:
return self.fail(
TestResultCode.DATA_MISMATCH,
self.testresult.result_string, False)
elif self.testresult.result_code == 3:
return self.fail(
TestResultCode.LOW_LEVEL_PARSE_FAIL,
self.testresult.result_string, False)
elif self.testresult.result_code == 2:
return self.fail(
TestResultCode.INTERNAL_ERROR,
self.testresult.result_string, False)
else:
return self.fail(
TestResultCode.UNKNOWN_FAIL,
self.testresult.result_string, False)
def run_tests(self, test_type, api_url, response_type, read_from_cache):
"""run multiple tests (comma-separated). beware of app engine timeouts!"""
self.api_url = api_url
self.response_type = response_type
self.read_from_cache = read_from_cache
if test_type == 'all':
test_type = ALL_TEST_TYPES
test_types = test_type.split(',')
res = True
for test_type in test_types:
test_type = test_type.strip()
if self.read_from_cache:
"""query the Datastore for existing test data"""
testresults = db.GqlQuery("SELECT * FROM TestResults " +
"WHERE test_type = :1 " +
#"AND result_code = 0 " +
"ORDER BY timestamp DESC", test_type)
testresult = testresults.get()
if testresult:
res = self.datastore_test_check(testresult, test_type)
else:
if not self.run_test(test_type):
res = False
else:
if not self.run_test(test_type):
res = False
return res
def get_result_set(self, arg_list):
"""macro for forming and making a request and parsing the results."""
full_uri = self.make_uri(arg_list)
self.output('<p class="uri">Fetching result set for following tests</p>')
self.output('<p class="uri">URI: ' + full_uri + '</p>')
try:
data = retrieve_raw_data(full_uri)
return self.parse_raw_data(data)
except:
self.fail(TestResultCode.LOW_LEVEL_PARSE_FAIL,
'parse_raw_data: unable to parse response.', True)
return None
def test_num(self):
"""test whether the result set has a given number of results."""
expected_count = int(random_item(['7', '14', '21', '28', '57']))
result_set = self.get_result_set({'q':'in', 'num':expected_count})
if not self.assert_nonempty_results(result_set):
return False
if len(result_set) != expected_count:
return self.fail(
TestResultCode.DATA_MISMATCH,
'Requested num='+str(expected_count)+' but received '+
str(len(result_set))+' results.', True)
return self.success(True)
def int_test_bogus_query(self):
""" try a few bogus locations to make sure there's no weird data """
term = random_item(["fqzzqx"])
result_set = self.get_result_set({'q':term})
if self.assert_empty_results(result_set):
return self.success(True)
else:
return self.fail(
TestResultCode.DATA_MISMATCH,
'some item(s) found for search term <strong>' + term +
'</strong> or result set invalid', True)
def int_test_valid_query(self):
"""run a hardcoded test query (q=)."""
result = True
term = random_item(["hospital", "walk", "help", "read", "children",
"mercy"])
result_set = self.get_result_set({'q':term})
if not self.assert_nonempty_results(result_set):
return False
result = True
for opp in result_set:
if (not re.search(term, opp.title, re.I) and
not re.search(term, opp.description, re.I)):
self.print_details('Did not find search term <strong>'+term+
'</strong> in item '+opp.title+': '+opp.description)
result = False
if not result:
return self.fail(
TestResultCode.DATA_MISMATCH,
'some item(s) did not match search term <strong>' + term, True)
return self.success(True)
def test_query(self):
"""run a set of query term tests."""
self.int_test_valid_query()
self.int_test_bogus_query()
def int_test_bogus_geo(self):
""" try a few bogus locations to make sure there's no weird data """
location = random_item(["fqzvzqx"])
result_set = self.get_result_set({'vol_loc':location})
if self.assert_empty_results(result_set):
return self.success(True)
else:
return self.fail(
TestResultCode.DATA_MISMATCH,
'some item(s) found for location <strong>' + location +
'</strong> or result set invalid', True)
def int_test_valid_geo(self):
"""run a query and check the geo results."""
loc = random_item(["37.8524741,-122.273895", "33.41502,-111.82298",
"33.76145285137889,-84.38941955566406",
"29.759956,-95.362534"])
radius = random_item(["10", "20", "30", "50"])
result_set = self.get_result_set({'vol_loc':loc, 'vol_dist':radius,
'num':20})
if not self.assert_nonempty_results(result_set):
return False
result = True
for opp in result_set:
if not in_location(opp, loc, radius):
self.print_details('Item outside location/distance <strong>'+opp.id+
': '+opp.title+'</strong> '+opp.latlong)
result = False
if not result:
return self.fail(
TestResultCode.DATA_MISMATCH,
'One or more items did not fall in the requested location/distance.', True)
return self.success(True)
def test_geo(self):
"""run a set of geo tests."""
self.int_test_valid_geo()
self.int_test_bogus_geo()
def test_provider(self):
"""run a hardcoded test query (&vol_provider=)."""
provider = "HandsOn Network"
result_set = self.get_result_set({'q':'hospital', 'vol_provider':provider})
if not self.assert_nonempty_results(result_set):
return False
result = True
for opp in result_set:
if re.search(provider, opp.provider, re.I) == None:
self.print_details('Wrong provider <strong>'+opp.provider+'</strong>'+
'found in item <em>'+opp.title+'</em>')
result = False
if not result:
return self.fail(
TestResultCode.DATA_MISMATCH,
'One or more items did not match provider <strong>provider+</strong>', True)
return self.success(True)
def test_start(self):
"""
Tests two result sets to ensure that the API 'start' parameter is
valid. Assumes:
result_set1 and result_set2 must overlap (i.e. (start2 - start1) < num_items)
start1 < start2
Simply tests to make sure that result_set1[start2] = result_set2[start1]
and continues testing through the end of the items that should overlap
"""
start1 = 1
start2 = 5
num_items = 10
result_set1 = self.get_result_set({'q':'in',
'num': num_items, 'start': start1})
result_set2 = self.get_result_set({'q':'in',
'num': num_items, 'start': start2})
if (not self.assert_nonempty_results(result_set1) or
not self.assert_nonempty_results(result_set2)):
return False
result = True
for i in range(start2, num_items):
opp1 = result_set1[i]
opp2 = result_set2[start1 + (i - start2)]
if opp1.title != opp2.title:
self.print_details('List items different, <em>'+opp1.title+'</em> != '+
'<em>'+opp2.title+'</em>')
result = False
if not result:
return self.fail(
TestResultCode.DATA_MISMATCH,
'Start param returned non-overlapping results.', True)
return self.success(True)
def test_snippets(self):
"""ensure that /ui_snippets returns something valid."""
pieces = urlsplit(self.api_url)
domain = pieces.netloc
self.print_details(domain)
data = retrieve_raw_data('http://'+domain+'/ui_snippets?q=a&cache=0')
if not data:
return self.fail(
TestResultCode.UNKNOWN_FAIL,
'misc problem with /ui_snippets', True)
return self.success(True)
| Python |
# Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
views in the app, in the MVC sense.
"""
# note: view classes aren inherently not pylint-compatible
# pylint: disable-msg=C0103
# pylint: disable-msg=W0232
# pylint: disable-msg=E1101
# pylint: disable-msg=R0903
from datetime import datetime
import cgi
import email.Utils
import os
import urllib
import logging
import re
import time
from google.appengine.api import memcache
from google.appengine.api import urlfetch
from google.appengine.api import users
from google.appengine.ext import db
from google.appengine.ext import webapp
from google.appengine.ext.webapp import template
from fastpageviews import pagecount
from third_party.recaptcha.client import captcha
import api
import base_search
import deploy
import geocode
import models
import modelutils
import posting
import search
import urls
import userinfo
import utils
import view_helper
import searchresult
TEMPLATE_DIR = 'templates/'
HOMEPAGE_TEMPLATE = 'homepage.html'
TEST_PAGEVIEWS_TEMPLATE = 'test_pageviews.html'
SEARCH_RESULTS_TEMPLATE = 'search_results.html'
SEARCH_RESULTS_DEBUG_TEMPLATE = 'search_results_debug.html'
SEARCH_RESULTS_RSS_TEMPLATE = 'search_results.rss'
SEARCH_RESULTS_MISSING_KEY_TEMPLATE = 'search_results_missing_key.html'
SNIPPETS_LIST_TEMPLATE = 'snippets_list.html'
SNIPPETS_LIST_MINI_TEMPLATE = 'snippets_list_mini.html'
SNIPPETS_LIST_RSS_TEMPLATE = 'snippets_list.rss'
MY_EVENTS_TEMPLATE = 'my_events.html'
POST_TEMPLATE = 'post.html'
POST_RESULT_TEMPLATE = 'post_result.html'
ADMIN_TEMPLATE = 'admin.html'
DATAHUB_DASHBOARD_TEMPLATE = 'datahub_dashboard.html'
MODERATE_TEMPLATE = 'moderate.html'
STATIC_CONTENT_TEMPLATE = 'static_content.html'
NOT_FOUND_TEMPLATE = 'not_found.html'
DASHBOARD_BASE_URL = "http://google1.osuosl.org/~footprint/datahub/dashboard/"
DATAHUB_LOG = DASHBOARD_BASE_URL + "load_gbase.log.bz2"
DEFAULT_NUM_RESULTS = 10
# Register custom Django templates
template.register_template_library('templatetags.comparisonfilters')
template.register_template_library('templatetags.stringutils')
template.register_template_library('templatetags.dateutils_tags')
# TODO: not safe vs. spammers to checkin... but in our design,
# the worst that happens is a bit more spam in our moderation
# queue, i.e. no real badness, just slightly longer review
# cycle until we can regen a new key. Meanwhile, avoiding this
# outright is a big pain for launch, regen is super easy and
# it could be a year+ before anybody notices. Note: the varname
# is intentionally boring, to avoid accidental discovery by
# code search tools.
PK = "6Le2dgUAAAAAABp1P_NF8wIUSlt8huUC97owQ883"
def get_unique_args_from_request(request):
""" Gets unique args from a request.arguments() list.
If a URL search string contains a param more than once, only
the last value is retained.
For example, for the query "http://foo.com/?a=1&a=2&b=3"
this function would return { 'a': '2', 'b': '3' }
Args:
request: A list given by webapp.RequestHandler.request.arguments()
Returns:
dictionary of URL parameters.
"""
args = request.arguments()
unique_args = {}
for arg in args:
allvals = request.get_all(arg)
unique_args[arg] = allvals[len(allvals)-1]
return unique_args
def optimize_page_speed(request):
"""OK to optimize the page: minimize CSS, minimize JS, Sprites, etc."""
# page_optim=1 forces optimization
page_optim = request.get('page_optim')
if page_optim == "1":
return True
# page_debug=1 forces no optimization
page_debug = request.get('page_debug')
if page_debug == "1":
return False
# optimize in production and on appspot
if (request.host_url.find("appspot.com") >= 0 or
request.host_url.find("allforgood.org") >= 0):
return True
return False
def get_default_template_values(request, current_page):
# for debugging login issues
no_login = (request.get('no_login') == "1")
version = request.get('dbgversion')
# don't allow junk
if not version or not re.search(r'^[0-9a-z._-]+$', version):
version = os.getenv('CURRENT_VERSION_ID')
template_values = {
'user' : userinfo.get_user(request),
'current_page' : current_page,
'host' : urllib.quote(request.host_url),
'path' : request.path,
'version' : version,
'no_login' : no_login,
'optimize_page' : optimize_page_speed(request),
'view_url': request.url,
}
load_userinfo_into_dict(template_values['user'], template_values)
return template_values
def load_userinfo_into_dict(user, userdict):
"""populate the given dict with user info."""
if user:
userdict["user"] = user
userdict["user_days_since_joined"] = (datetime.now() -
user.get_user_info().first_visit).days
else:
userdict["user"] = None
userdict["user_days_since_joined"] = None
def render_template(template_filename, template_values):
"""wrapper for template.render() which handles path."""
path = os.path.join(os.path.dirname(__file__),
TEMPLATE_DIR + template_filename)
deploy.load_standard_template_values(template_values)
rendered = template.render(path, template_values)
return rendered
def require_moderator(handler_method):
"""Decorator ensuring the current FP user is a logged in moderator.
Also sets self.user.
"""
def decorate(self):
if not getattr(self, 'user', None):
self.user = userinfo.get_user(self.request)
if not self.user:
self.error(401)
self.response.out.write('<html><body>Please log in.</body></html>')
return
if (not self.user.get_user_info() or
not self.user.get_user_info().moderator):
self.error(403)
self.response.out.write('<html><body>Permission denied.</body></html>')
logging.warning('Non-moderator blacklist attempt.')
return
return handler_method(self)
return decorate
def require_usig(handler_method):
"""Deceratore ensuring the current FP user has a valid usig XSRF token.
Also sets self.usig and self.user."""
def decorate(self):
if not getattr(self, 'user', None):
self.user = userinfo.get_user(self.request)
self.usig = userinfo.get_usig(self.user)
if self.usig != self.request.get('usig'):
self.error(403)
logging.warning('XSRF attempt. %s!=%s',
self.usig, self.request.get('usig'))
return
return handler_method(self)
return decorate
def require_admin(handler_method):
"""Decorator ensuring the current App Engine user is an administrator."""
def decorate(self):
"""Validate request is from an admin user, send to logon page if not."""
user = users.get_current_user()
if user:
if users.is_current_user_admin():
# User is an admin, go ahead and run the handler
return handler_method(self)
else:
# User is not an admin, return unauthorized
self.error(401)
html = '<html><body>'
html += 'Sorry, you are not an administrator. Please '
html += '<a href="%s">' % users.create_logout_url(self.request.url)
html += 'log out</a> and sign in as an administrator.'
html += '</body></html>'
self.response.out.write(html)
return
# No user, redirect to the login page
self.redirect(users.create_login_url(self.request.url))
return
return decorate
def expires(seconds):
"""Set expires and cache-control headers appropriately."""
# If you try to use '@expires' instead of '@expires(0)', this
# will raise an exception.
seconds = int(seconds)
def decorator(handler_method):
def decorate(self):
if seconds <= 0:
# Expire immediately.
self.response.headers['Cache-Control'] = 'no-cache'
self.response.headers['Expires'] = 'Thu, 01 Jan 2009 00:00:00 GMT'
pass
else:
self.response.headers['Cache-Control'] = 'public'
self.response.headers['Expires'] = email.Utils.formatdate(
time.time() + seconds, usegmt=True)
# The handler method can now re-write these if needed.
return handler_method(self)
return decorate
return decorator
class test_page_views_view(webapp.RequestHandler):
"""testpage for pageviews counter."""
@expires(0)
def get(self):
"""HTTP get method."""
pagename = "testpage%s" % (self.request.get('pagename'))
pc = pagecount.IncrPageCount(pagename, 1)
template_values = pagecount.GetStats()
template_values["pagename"] = pagename
template_values["pageviews"] = pc
self.response.out.write(render_template(TEST_PAGEVIEWS_TEMPLATE,
template_values))
class home_page_view(webapp.RequestHandler):
"""default homepage for consumer UI."""
@expires(0) # User specific.
def get(self):
"""HTTP get method."""
template_values = get_default_template_values(self.request, 'HOMEPAGE')
self.response.out.write(render_template(HOMEPAGE_TEMPLATE,
template_values))
class home_page_redir_view(webapp.RequestHandler):
"""handler for /home, which somehow got indexed by google."""
@expires(0)
def get(self):
"""HTTP get method."""
self.redirect("/")
class not_found_handler(webapp.RequestHandler):
def get(self):
self.error(404)
template_values = get_default_template_values(self.request, 'STATIC_PAGE')
self.response.out.write(render_template(NOT_FOUND_TEMPLATE,
template_values))
class consumer_ui_search_redir_view(webapp.RequestHandler):
"""handler for embedded HTML forms, which can't form queries
with query params to the right of the # (hash)."""
@expires(0)
def get(self):
"""HTTP get method."""
# replace the path and replace the ? with #
# down the road, if the consumer UI diverges from the urlparams
# required by HTML embedders, then this algorithm could become
# more complex, possibly including real page(s) instead of a
# simple reuse of the consumer UI.
dest = urls.URL_CONSUMER_UI_SEARCH + "#" + self.request.query_string
self.redirect(dest)
class consumer_ui_search_view(webapp.RequestHandler):
"""default homepage for consumer UI."""
@expires(0) # User specific.
def get(self):
"""HTTP get method."""
template_values = get_default_template_values(self.request, 'SEARCH')
template_values['result_set'] = {}
template_values['is_main_page'] = True
self.response.out.write(render_template(SEARCH_RESULTS_TEMPLATE,
template_values))
class search_view(webapp.RequestHandler):
"""run a search from the API. note various output formats."""
@expires(1800) # Search results change slowly; cache for half an hour.
def get(self):
"""HTTP get method."""
unique_args = get_unique_args_from_request(self.request)
if "key" not in unique_args:
tplresult = render_template(SEARCH_RESULTS_MISSING_KEY_TEMPLATE, {})
self.response.out.write(tplresult)
pagecount.IncrPageCount("key.missing", 1)
return
pagecount.IncrPageCount("key.%s.searches" % unique_args["key"], 1)
result_set = search.search(unique_args)
# insert the interest data-- API searches are anonymous, so set the user
# interests to 'None'. Note: left here to avoid polluting searchresults.py
# with view_helper.py and social/personalization stuff.
opp_ids = []
# perf: only get interest counts for opps actually in the clipped results
for primary_res in result_set.clipped_results:
opp_ids += [result.item_id for result in primary_res.merged_list]
others_interests = view_helper.get_interest_for_opportunities(opp_ids)
view_helper.annotate_results(None, others_interests, result_set)
# add-up the interests from the children
for primary_res in result_set.clipped_results:
for result in primary_res.merged_list:
primary_res.interest_count += result.interest_count
result_set.request_url = self.request.url
output = None
if api.PARAM_OUTPUT in unique_args:
output = unique_args[api.PARAM_OUTPUT]
if not output or output == "html":
if "geocode_responses" not in unique_args:
unique_args["geocode_responses"] = 1
tpl = SEARCH_RESULTS_DEBUG_TEMPLATE
elif output == "rss":
self.response.headers["Content-Type"] = "application/rss+xml"
tpl = SEARCH_RESULTS_RSS_TEMPLATE
elif output == "csv":
# TODO: implement SEARCH_RESULTS_CSV_TEMPLATE
tpl = SEARCH_RESULTS_RSS_TEMPLATE
elif output == "tsv":
# TODO: implement SEARCH_RESULTS_TSV_TEMPLATE
tpl = SEARCH_RESULTS_RSS_TEMPLATE
elif output == "xml":
# TODO: implement SEARCH_RESULTS_XML_TEMPLATE
#tpl = SEARCH_RESULTS_XML_TEMPLATE
tpl = SEARCH_RESULTS_RSS_TEMPLATE
elif output == "rssdesc":
# TODO: implement SEARCH_RESULTS_RSSDESC_TEMPLATE
tpl = SEARCH_RESULTS_RSS_TEMPLATE
else:
# TODO: implement SEARCH_RESULTS_ERROR_TEMPLATE
# TODO: careful about escapification/XSS
tpl = SEARCH_RESULTS_DEBUG_TEMPLATE
latlng_string = ""
if "lat" in result_set.args and "long" in result_set.args:
latlng_string = "%s,%s" % (result_set.args["lat"],
result_set.args["long"])
logging.debug("geocode("+result_set.args[api.PARAM_VOL_LOC]+") = "+
result_set.args["lat"]+","+result_set.args["long"])
template_values = get_default_template_values(self.request, 'SEARCH')
template_values.update({
'result_set': result_set,
# TODO: remove this stuff...
'latlong': latlng_string,
'keywords': result_set.args[api.PARAM_Q],
'location': result_set.args[api.PARAM_VOL_LOC],
'max_distance': result_set.args[api.PARAM_VOL_DIST],
})
# pagecount.GetPageCount() is expensive-- only do for debug mode,
# where this is printed.
if tpl == SEARCH_RESULTS_DEBUG_TEMPLATE:
for res in result_set.clipped_results:
res.merged_clicks = pagecount.GetPageCount(
pagecount.CLICKS_PREFIX+res.merge_key)
if res.merged_impressions < 1.0:
res.merged_ctr = "0"
else:
res.merged_ctr = "%.2f" % (
100.0 * float(res.merged_clicks) / float(res.merged_impressions))
self.response.out.write(render_template(tpl, template_values))
class ui_snippets_view(webapp.RequestHandler):
"""run a search and return consumer HTML for the results--
this awful hack exists for latency reasons: it's super slow to
parse things on the client."""
@expires(0) # User specific.
def get(self):
"""HTTP get method."""
unique_args = get_unique_args_from_request(self.request)
result_set = search.search(unique_args)
result_set.request_url = self.request.url
template_values = get_default_template_values(self.request, 'SEARCH')
# Retrieve the user-specific information for the search result set.
user = template_values['user']
if user:
template_values['moderator'] = user.get_user_info().moderator
result_set = view_helper.get_annotated_results(user, result_set)
view_data = view_helper.get_friends_data_for_snippets(user)
else:
template_values['moderator'] = False
view_data = {
'friends': [],
'friends_by_event_id_js': '{}',
}
loc = unique_args.get(api.PARAM_VOL_LOC, None)
if loc and not geocode.is_latlong(loc) and not geocode.is_latlongzoom(loc):
template_values['query_param_loc'] = loc
else:
template_values['query_param_loc'] = None
template_values.update({
'result_set': result_set,
'has_results' : (result_set.num_merged_results > 0), # For django.
'last_result_index' :
result_set.clip_start_index + len(result_set.clipped_results),
'display_nextpage_link' : result_set.has_more_results,
'friends' : view_data['friends'],
'friends_by_event_id_js': view_data['friends_by_event_id_js'],
'query_param_q' : unique_args.get(api.PARAM_Q, None),
})
if self.request.get('minimal_snippets_list'):
# Minimal results list for homepage.
result_set.clipped_results.sort(cmp=searchresult.compare_result_dates)
self.response.out.write(render_template(SNIPPETS_LIST_MINI_TEMPLATE,
template_values))
else:
self.response.out.write(render_template(SNIPPETS_LIST_TEMPLATE,
template_values))
class ui_my_snippets_view(webapp.RequestHandler):
"""The current spec for the My Events view (also known as "Profile")
defines the following filters:
* Filter on my own events
* Filter on my own + my friends's events
* Filter on various relative time periods
* Filter on events that are still open (not past their completion dates)
Furthermore the UI is spec'd such that each event displays a truncated list
of friend names, along with a total count of friends.
In order to collect that info, we seem to be stuck with O(n2) because
I need to know *all* the events that *all* of my friends are interested in:
1. Get the list of all events that I like or am doing.
2. Get the list of all my friends.
3. For each of my friends, get the list of all events that that friend likes
or is doing.
4. For each of the events found in step (3), associate the list of all
interested users with that event.
"""
@expires(0) # User specific.
def get(self):
"""HTTP get method."""
template_values = get_default_template_values(self.request, 'MY_EVENTS')
user_info = template_values['user']
unique_args = get_unique_args_from_request(self.request)
if user_info:
# Get the list of all events that I like or am doing.
# This is a dict of event id keys and interest flag values (right now
# we only support Liked).
dict = view_helper.get_user_interests(user_info, True)
my_interests = dict['interests']
ordered_event_ids = dict['ordered_event_ids']
# Fetch the event details for the events I like, so they can be
# displayed in the snippets template.
my_events_gbase_result_set = base_search.get_from_ids(ordered_event_ids)
for result in my_events_gbase_result_set.results:
result.interest = my_interests.get(result.item_id, 0)
search.normalize_query_values(unique_args)
start = unique_args[api.PARAM_START]
my_events_gbase_result_set.clip_start_index = start
num = unique_args[api.PARAM_NUM]
# Handle clipping.
my_events_gbase_result_set.clip_results(start, num)
# Get general interest numbers (i.e., not filtered to friends).
overall_stats_for_my_interests = \
view_helper.get_interest_for_opportunities(my_interests)
view_helper.annotate_results(my_interests,
overall_stats_for_my_interests,
my_events_gbase_result_set)
friend_data = view_helper.get_friends_data_for_snippets(user_info)
template_values.update({
'result_set': my_events_gbase_result_set,
'has_results' : len(my_events_gbase_result_set.clipped_results) > 0,
'last_result_index':
my_events_gbase_result_set.clip_start_index + \
len(my_events_gbase_result_set.clipped_results),
'display_nextpage_link' : my_events_gbase_result_set.has_more_results,
'friends' : friend_data['friends'],
'friends_by_event_id_js': friend_data['friends_by_event_id_js'],
'like_count': len(my_interests),
})
else:
template_values.update({ 'has_results' : False, })
template_values['query_param_q'] = unique_args.get(api.PARAM_Q, None)
loc = unique_args.get(api.PARAM_VOL_LOC, None)
if not geocode.is_latlong(loc) and not geocode.is_latlongzoom(loc):
template_values['query_param_loc'] = loc
self.response.out.write(render_template(SNIPPETS_LIST_TEMPLATE,
template_values))
class my_events_view(webapp.RequestHandler):
"""Shows events that you and your friends like or are doing."""
@expires(0) # User specific.
def get(self):
"""HTTP get method."""
template_values = get_default_template_values(self.request, 'MY_EVENTS')
if not template_values['user']:
template_values = {
'current_page': 'MY_EVENTS',
# Don't bother rendering this page if not authenticated.
'redirect_to_home': True,
}
self.response.out.write(render_template(MY_EVENTS_TEMPLATE,
template_values))
return
self.response.out.write(render_template(MY_EVENTS_TEMPLATE,
template_values))
class post_view(webapp.RequestHandler):
"""user posting flow."""
@expires(0)
def post(self):
"""HTTP post method."""
return self.get()
@expires(0)
def get(self):
"""HTTP get method."""
user_info = userinfo.get_user(self.request)
# synthesize GET method url from either GET or POST submission
geturl = self.request.path + "?"
for arg in self.request.arguments():
geturl += urllib.quote_plus(arg) + "=" + \
urllib.quote_plus(self.request.get(arg)) + "&"
template_values = {
'current_page' : 'POST',
'geturl' : geturl,
}
load_userinfo_into_dict(user_info, template_values)
resp = None
recaptcha_challenge_field = self.request.get('recaptcha_challenge_field')
if not recaptcha_challenge_field:
self.response.out.write(render_template(POST_TEMPLATE, template_values))
return
recaptcha_response_field = self.request.get('recaptcha_response_field')
resp = captcha.submit(recaptcha_challenge_field, recaptcha_response_field,
PK, self.request.remote_addr)
vals = {}
computed_vals = {}
recaptcha_response = self.request.get('recaptcha_response_field')
if (resp and resp.is_valid) or recaptcha_response == "test":
vals["user_ipaddr"] = self.request.remote_addr
load_userinfo_into_dict(user_info, vals)
for arg in self.request.arguments():
vals[arg] = self.request.get(arg)
respcode, item_id, content = posting.create_from_args(vals, computed_vals)
# TODO: is there a way to reference a dict-value in appengine+django ?
for key in computed_vals:
template_values["val_"+str(key)] = str(computed_vals[key])
template_values["respcode"] = str(respcode)
template_values["id"] = str(item_id)
template_values["content"] = str(content)
else:
template_values["respcode"] = "401"
template_values["id"] = ""
template_values["content"] = "captcha error, e.g. response didn't match"
template_values["vals"] = vals
for key in vals:
keystr = "val_"+str(key)
if keystr in template_values:
# should never happen-- throwing a 500 avoids silent failures
self.response.set_status(500)
self.response.out.write("internal error: duplicate template key")
logging.error("internal error: duplicate template key: "+keystr)
return
template_values[keystr] = str(vals[key])
self.response.out.write(render_template(POST_RESULT_TEMPLATE,
template_values))
class admin_view(webapp.RequestHandler):
"""admin UI."""
@require_admin
@expires(0)
def get(self):
"""HTTP get method."""
# XSRF check: usig = signature of the user's login cookie.
# Note: This is the logged in app engine user and uses
# an internal implementation detail of appengine.
usig = utils.signature(userinfo.get_cookie('ACSID') or
userinfo.get_cookie('dev_appserver_login'))
template_values = {
'logout_link': users.create_logout_url('/'),
'msg': '',
'action': '',
'usig': usig,
'version' : os.getenv('CURRENT_VERSION_ID'),
}
action = self.request.get('action')
if not action:
action = "mainmenu"
template_values['action'] = action
if action == "mainmenu":
template_values['msg'] = ""
elif action == "flush_memcache":
memcache.flush_all()
template_values['msg'] = "memcached flushed"
elif action == 'moderators':
self.admin_moderator(template_values)
logging.debug("admin_view: %s" % template_values['msg'])
self.response.out.write(render_template(ADMIN_TEMPLATE,
template_values))
def admin_moderator(self, template_values):
"""View for adding/deleting moderators."""
# TODO: Use the template!
message = []
message.append('<h2>Moderator Management</h2>')
moderator_query = models.UserInfo.gql('WHERE moderator = TRUE')
request_query = models.UserInfo.gql('WHERE moderator = FALSE and ' +
'moderator_request_email > \'\'')
message.append('<form method="POST">'
'<input type="hidden" name="usig" value="%s">'
'<input type="hidden" name="action" value="moderators">' %
template_values['usig'])
message.append('Existing moderators'
'<table><tr><td>+</td><td>-</td><td>UID</td><td>Email</td></tr>')
for moderator in moderator_query:
keyname = moderator.key().name() or ''
desc = moderator.moderator_request_desc or ''
email = moderator.moderator_request_email or ''
message.append('<tr><td> </td><td>'
'<input type="checkbox" name="disable" value="%s"></td><td>%s</td>'
'<td><span title="%s">%s</span></td></tr>' %
(cgi.escape(keyname, True), cgi.escape(keyname),
cgi.escape(desc, True), cgi.escape(email)))
message.append('</table>Requests<table>'
'<tr><td>+</td><td>-</td>'
'<td>UID</td><td>Email</td></tr>')
for request in request_query:
keyname = request.key().name() or ''
desc = request.moderator_request_desc or ''
email = request.moderator_request_email or ''
message.append('<tr><td>'
'<input type="checkbox" name="enable" value="%s"></td>'
'<td> </td>'
'<td>%s</td><td><span title="%s">%s</span></td></tr>' %
(cgi.escape(keyname, True), cgi.escape(keyname, True),
cgi.escape(desc, True), cgi.escape(email, True)))
message.append('</table>'
'<input type="submit" />'
'</form>')
message.append('<hr>')
template_values['msg'] = ''.join(message)
@require_admin
def post(self):
"""HTTP post method."""
if self.request.get('action') != 'moderators':
self.error(400)
return
usig = utils.signature(userinfo.get_cookie('ACSID') or
userinfo.get_cookie('dev_appserver_login'))
if self.request.get('usig') != usig:
self.error(400)
logging.warning('XSRF attempt. %s!=%s', usig, self.request.get('usig'))
return
keys_to_enable = self.request.POST.getall('enable')
keys_to_disable = self.request.POST.getall('disable')
now = datetime.isoformat(datetime.now())
admin = users.get_current_user().email()
users_to_enable = models.UserInfo.get_by_key_name(keys_to_enable)
for user in users_to_enable:
user.moderator = True
if not user.moderator_request_admin_notes:
user.moderator_request_admin_notes = ''
user.moderator_request_admin_notes += '%s: Enabled by %s.\n' % \
(now, admin)
db.put(users_to_enable)
users_to_disable = models.UserInfo.get_by_key_name(keys_to_disable)
for user in users_to_disable:
user.moderator = False
if not user.moderator_request_admin_notes:
user.moderator_request_admin_notes = ''
user.moderator_request_admin_notes += '%s: Disabled by %s.\n' % \
(now, admin)
db.put(users_to_disable)
self.response.out.write(
'<div style="color: green">Enabled %s and '
'disabled %s moderators.</div>' %
(len(users_to_enable), len(users_to_disable)))
self.response.out.write('<a href="%s?action=moderators&zx=%d">'
'Continue</a>' % (self.request.path_url, datetime.now().microsecond))
class redirect_view(webapp.RequestHandler):
"""Process redirects. Present an interstital if the url is not signed."""
@expires(0) # Used for counting.
def get(self):
"""HTTP get method."""
# destination url
url = self.request.get('q')
if not url or (not url.startswith('http:') and
not url.startswith('https:')):
self.error(400)
return
# id is optional -- for tracking clicks on individual items
id = self.request.get('id')
if not id:
id = ""
sig = self.request.get('sig')
expected_sig = utils.signature(url+id)
logging.debug('url: %s s: %s xs: %s' % (url, sig, expected_sig))
if sig == expected_sig:
if id:
# note: testapi calls don't test clicks...
clicks = pagecount.IncrPageCount(pagecount.CLICKS_PREFIX + id, 1)
# note: clicks are relatively rare-- we can trivially afford the
# cost of CTR computation, and it helps development.
views = pagecount.GetPageCount(pagecount.VIEWS_PREFIX + id)
logging.debug("click: merge_key=%s clicks=%d views=%d ctr=%.1f%%" %
(id, clicks, views, float(clicks)/float(views+0.1)))
self.redirect(url)
return
# TODO: Use a proper template so this looks nicer.
response = ('<h1>Redirect</h1>' +
'This page is sending you to <a href="%s">%s</a><p />' %
(cgi.escape(url, True), cgi.escape(url, True)))
# TODO: Something more clever than go(-1), which doesn't work on new
# windows, etc. Maybe check for 'referer' or send users to '/'.
response += ('If you do not want to visit that page, you can ' +
'<a href="javascript:history.go(-1)">go back</a>.')
self.response.out.write(response)
class moderate_view(webapp.RequestHandler):
"""fast UI for voting/moderating on listings."""
@require_moderator
@expires(0)
def get(self):
"""HTTP get method."""
return self.moderate_postings(False)
@require_moderator
@require_usig
def post(self):
"""HTTP post method."""
return self.moderate_postings(True)
def moderate_postings(self, is_post):
"""Combined request handler. Only POSTs can modify state."""
action = self.request.get('action')
if action == "test":
posting.createTestDatabase()
now = datetime.now()
nowstr = now.strftime("%Y-%m-%d %H:%M:%S")
if is_post:
ts = self.request.get('ts', nowstr)
dt = datetime.strptime(ts, "%Y-%m-%d %H:%M:%S")
delta = now - dt
if delta.seconds < 3600:
logging.debug("processing changes...")
vals = {}
for arg in self.request.arguments():
vals[arg] = self.request.get(arg)
posting.process(vals)
num = self.request.get('num', "20")
reslist = posting.query(num=int(num))
def compare_quality_scores(s1, s2):
"""compare two quality scores for the purposes of sorting."""
diff = s2.quality_score - s1.quality_score
if (diff > 0):
return 1
if (diff < 0):
return -1
return 0
reslist.sort(cmp=compare_quality_scores)
for i, res in enumerate(reslist):
res.idx = i+1
if res.description > 100:
res.description = res.description[0:97]+"..."
template_values = get_default_template_values(self.request, 'MODERATE')
template_values.update({
'num' : str(num),
'ts' : str(nowstr),
'result_set' : reslist,
'usig' : userinfo.get_usig(self.user)
})
self.response.out.write(render_template(MODERATE_TEMPLATE, template_values))
class moderate_blacklist_view(webapp.RequestHandler):
"""Handle moderating blacklist entries."""
@require_moderator
@expires(0)
def get(self):
"""HTTP get method for blacklist actions."""
action = self.request.get('action')
if action not in ['blacklist', 'unblacklist']:
self.error(400)
return
key = self.request.get('key')
if not key:
self.error(400)
self.response.out.write("<html><body>sorry: key required</body></html>")
return
def generate_blacklist_form(action, key):
"""Return an HTML form for the blacklist action."""
# TODO: This should obviously be in a template.
usig = userinfo.get_usig(self.user)
return ('<form method="POST" action="%s">'
'<input type="hidden" name="action" value="%s">'
'<input type="hidden" name="usig" value="%s">'
'<input type="hidden" name="key" value="%s">'
'<input type="submit" value="I am sure">'
'</form>' %
(self.request.path_url, action, usig, key))
text = 'Internal error.'
opp_stats = modelutils.get_by_ids(models.VolunteerOpportunityStats,
[key])
key_blacklisted = key in opp_stats and opp_stats[key].blacklisted
if action == "blacklist" and not key_blacklisted:
text = ('Please confirm blacklisting of key %s: %s' %
(key, generate_blacklist_form('blacklist', key)))
elif action == 'unblacklist' and not key_blacklisted:
text = 'Key %s is not currently blacklisted.' % key
else:
text = ('key %s is already blacklisted.<br>'
'Would you like to restore it?%s' %
(key, generate_blacklist_form('unblacklist', key)))
# TODO: Switch this to its own template!
template_values = {
'user' : self.user,
'path' : self.request.path,
'static_content' : text,
}
self.response.out.write(render_template(STATIC_CONTENT_TEMPLATE,
template_values))
@require_moderator
@require_usig
@expires(0)
def post(self):
"""Handle edits to the blacklist from HTTP POST."""
action = self.request.get('action')
if action not in ['blacklist', 'unblacklist']:
self.error(400)
return
key = self.request.get('key')
if not key:
self.error(400)
self.response.out.write("<html><body>sorry: key required</body></html>")
return
if self.request.get('action') == 'blacklist':
if not models.VolunteerOpportunityStats.set_blacklisted(key, 1):
text = 'Internal failure trying to add key %s to blacklist.' % key
else:
# TODO: better workflow, e.g. email the deleted key to an address
# along with an url to undelete it?
# Or just show it on the moderate/action=unblacklist page.
undel_url = '%s?action=unblacklist&key=%s' % (self.request.path_url,
key)
text = ('deleted listing with key %s.<br/>'
'To undo, click <a href="%s">here</a>'
' (you may want to save this URL).' %
(key, undel_url))
else:
if not models.VolunteerOpportunityStats.set_blacklisted(key, 0):
text = 'Internal failure trying to remove key %s from blacklist.' % key
else:
text = "un-deleted listing with key "+key
# TODO: Switch this to its own template!
template_values = {
'user' : self.user,
'path' : self.request.path,
'static_content' : text,
}
self.response.out.write(render_template(STATIC_CONTENT_TEMPLATE,
template_values))
class action_view(webapp.RequestHandler):
"""vote/tag/etc on a listing. TODO: rename to something more specific."""
@expires(0)
def post(self):
"""HTTP POST method."""
if self.request.get('type') != 'star':
self.error(400) # Bad request
return
user = userinfo.get_user(self.request)
opp_id = self.request.get('oid')
base_url = self.request.get('base_url')
new_value = self.request.get('i')
if not user:
logging.warning('No user.')
self.error(401) # Unauthorized
return
if not opp_id or not base_url or not new_value:
logging.warning('bad param')
self.error(400) # Bad request
return
new_value = int(new_value)
if new_value != 0 and new_value != 1:
self.error(400) # Bad request
return
xsrf_header_found = False
for h, v in self.request.headers.iteritems():
if h.lower() == 'x-requested-with' and v == 'XMLHttpRequest':
xsrf_header_found = True
break
if not xsrf_header_found:
self.error(400)
logging.warning('Attempted XSRF.')
return
user_entity = user.get_user_info()
user_interest = models.UserInterest.get_or_insert(
models.UserInterest.make_key_name(user_entity, opp_id),
user=user_entity, opp_id=opp_id, liked_last_modified=datetime.now())
if not user_interest:
self.error(500) # Server error.
return
# Populate VolunteerOpportunity table with (opp_id,base_url)
# TODO(paul): Populate this more cleanly and securely, not from URL params.
key = models.VolunteerOpportunity.DATASTORE_PREFIX + opp_id
info = models.VolunteerOpportunity.get_or_insert(key)
if info.base_url != base_url:
info.base_url = base_url
info.last_base_url_update = datetime.now()
info.base_url_failure_count = 0
info.put()
# pylint: disable-msg=W0612
(unused_new_entity, deltas) = \
modelutils.set_entity_attributes(user_interest,
{ models.USER_INTEREST_LIKED: new_value },
None)
if deltas is not None: # Explicit check against None.
success = models.VolunteerOpportunityStats.increment(opp_id, deltas)
if success:
self.response.out.write('ok')
return
self.error(500) # Server error.
class static_content(webapp.RequestHandler):
"""Handles static content like privacy policy and 'About Us'
The static content files are checked in SVN under /frontend/html.
We want to be able to update these files without having to push the
entire website. The code here fetches the content directly from SVN,
memcaches it, and serves that. So once a static content file is
submitted into SVN, it will go live on the site automatically (as soon
as memcache entry expires) without requiring a full site push.
"""
STATIC_CONTENT_MEMCACHE_TIME = 60 * 60 # One hour.
STATIC_CONTENT_MEMCACHE_KEY = 'static_content:'
@expires(0) # User specific. Maybe we should remove that so it's cacheable.
def get(self):
"""HTTP get method."""
remote_url = (urls.STATIC_CONTENT_LOCATION +
urls.STATIC_CONTENT_FILES[self.request.path])
text = memcache.get(self.STATIC_CONTENT_MEMCACHE_KEY + remote_url)
if not text:
# Content is not in memcache. Fetch from remote location.
# We have to append ?zx= to URL to avoid urlfetch's cache.
result = urlfetch.fetch("%s?zx=%d" % (remote_url,
datetime.now().microsecond))
if result.status_code == 200:
text = result.content
memcache.set(self.STATIC_CONTENT_MEMCACHE_KEY + remote_url,
text,
self.STATIC_CONTENT_MEMCACHE_TIME)
if not text:
self.error(404)
return
template_values = get_default_template_values(self.request, 'STATIC_PAGE')
template_values['static_content'] = text
self.response.out.write(render_template(STATIC_CONTENT_TEMPLATE,
template_values))
class datahub_dashboard_view(webapp.RequestHandler):
"""stats by provider, on a hidden URL (underlying data is a hidden URL)."""
@expires(0)
def get(self):
"""shutup pylint"""
template_values = {
'msg': '',
'action': '',
'version' : os.getenv('CURRENT_VERSION_ID'),
}
url = self.request.get('datahub_log')
if not url or url == "":
url = DATAHUB_LOG
fetch_result = urlfetch.fetch(url)
if fetch_result.status_code != 200:
template_values['msg'] = \
"error fetching dashboard data: code %d" % fetch_result.status_code
if re.search(r'[.]bz2$', url):
import bz2
fetch_result.content = bz2.decompress(fetch_result.content)
lines = fetch_result.content.split("\n")
# typical line
# 2009-04-26 18:07:16.295996:STATUS:extraordinaries done parsing: output
# 7 organizations and 7 opportunities (13202 bytes): 0 minutes.
statusrx = re.compile("(\d+-\d+-\d+) (\d+:\d+:\d+)[.]\d+:STATUS:(.+?) "+
"done parsing: output (\d+) organizations and "+
"(\d+) opportunities .(\d+) bytes.: (\d+) minutes")
def parse_date(datestr, timestr):
"""uses day granularity now that we have a few weeks of data.
At N=10 providers, 5 values, 12 bytes each, 600B per record.
daily is reasonable for a year, hourly is not."""
#match = re.search(r'(\d+):', timestr)
#hour = int(match.group(1))
#return datestr + str(4*int(hour / 4)) + ":00"
return datestr
js_data = ""
known_dates = {}
date_strings = []
known_providers = {}
provider_names = []
for line in lines:
match = re.search(statusrx, line)
if match:
hour = parse_date(match.group(1), match.group(2))
known_dates[hour] = 0
known_providers[match.group(3)] = 0
#js_data += "// hour="+hour+" provider="+match.group(3)+"\n"
template_values['provider_data'] = provider_data = []
sorted_providers = sorted(known_providers.keys())
for i, provider in enumerate(sorted_providers):
known_providers[provider] = i
provider_data.append([])
provider_names.append(provider)
#js_data += "// provider_names["+str(i)+"]="+provider_names[i]+"\n"
sorted_dates = sorted(known_dates.keys())
for i, hour in enumerate(sorted_dates):
for j, provider in enumerate(sorted_providers):
provider_data[j].append({})
known_dates[hour] = i
date_strings.append(hour)
#js_data += "// date_strings["+str(i)+"]="+date_strings[i]+"\n"
def commas(num):
num = str(num)
while True:
newnum, count = re.subn(r'(\d)(\d\d\d)(,|[.]|$)', r'\1,\2', num)
if count == 0:
break
num = newnum
return num
max_date = {}
for line in lines:
match = re.search(statusrx, line)
if match:
hour = parse_date(match.group(1), match.group(2))
date_idx = known_dates[hour]
provider = match.group(3)
provider_idx = known_providers[provider]
max_date[provider_idx] = re.sub(r':\d\d$', '',
match.group(1) + " " + match.group(2))
rec = provider_data[provider_idx][date_idx]
rec['organizations'] = int(match.group(4))
rec['listings'] = int(match.group(5))
rec['kbytes'] = int(float(match.group(6))/1024.0)
rec['loadtimes'] = int(match.group(7))
js_data += "function sv(row,col,val) {data.setValue(row,col,val);}\n"
js_data += "function ac(typ,key) {data.addColumn(typ,key);}\n"
js_data += "function acn(key) {data.addColumn('number',key);}\n"
# provider names are implemented as chart labels, so they line up
# with the charts-- otherwise it just doesn't work right.
js_data += "data = new google.visualization.DataTable();\n"
js_data += "data.addRows(1);"
for provider_idx, provider in enumerate(sorted_providers):
js_data += "\nacn('"+provider+"');"
js_data += "sv(0,"+str(provider_idx)+",0);"
js_data += "data.addRows(1);"
js_data += "\nacn('totals');"
js_data += "sv(0,"+str(len(sorted_providers))+",0);"
js_data += "\n"
js_data += "var chart = new google.visualization.ImageSparkLine("
js_data += " document.getElementById('provider_names'));\n"
js_data += "chart.draw(data,{width:160,height:50,showAxisLines:false,"
js_data += " showValueLabels:false,labelPosition:'right'});\n"
# provider last loaded times are implemented as chart labels, so
# they line up with the charts-- otherwise it just doesn't work.
js_data += "data = new google.visualization.DataTable();\n"
js_data += "data.addRows(1);"
maxdate = ""
for provider_idx, provider in enumerate(sorted_providers):
js_data += "\nacn('"+max_date[provider_idx]+"');"
js_data += "sv(0,"+str(provider_idx)+",0);"
if maxdate < max_date[provider_idx]:
maxdate = max_date[provider_idx]
js_data += "data.addRows(1);"
js_data += "\nacn('"+maxdate+"');"
js_data += "sv(0,"+str(len(sorted_providers))+",0);"
js_data += "\n"
js_data += "var chart = new google.visualization.ImageSparkLine("
js_data += " document.getElementById('lastloaded'));\n"
js_data += "chart.draw(data,{width:150,height:50,showAxisLines:false,"
js_data += " showValueLabels:false,labelPosition:'right'});\n"
totals = {}
for key in ['organizations', 'listings', 'kbytes', 'loadtimes']:
totals[key] = 0
js_data += "data = new google.visualization.DataTable();\n"
js_data += "data.addRows("+str(len(sorted_dates))+");\n"
colnum = 0
for provider_idx, provider in enumerate(sorted_providers):
colstr = ""
try:
# print the current number next to the graph
colstr = "\nacn('"+commas(str(provider_data[provider_idx][-1][key]))+"');"
totals[key] += provider_data[provider_idx][-1][key]
except:
colstr = "\nacn('0');"
for date_idx, hour in enumerate(sorted_dates):
try:
rec = provider_data[provider_idx][date_idx]
val = "sv("+str(date_idx)+","+str(colnum)+","+str(rec[key])+");"
except:
val = ""
colstr += val
colnum += 1
js_data += colstr
js_data += "data.addRows(1);"
js_data += "\nacn('"+commas(str(totals[key]))+"');"
js_data += "sv(0,"+str(len(sorted_providers))+",0);"
js_data += "\n"
js_data += "var chart = new google.visualization.ImageSparkLine("
js_data += " document.getElementById('"+key+"_chart'));\n"
js_data += "chart.draw(data,{width:200,height:50,showAxisLines:false,"
js_data += " showValueLabels:false,labelPosition:'right'});\n"
template_values['datahub_dashboard_js_data'] = js_data
logging.debug("datahub_dashboard_view: %s" % template_values['msg'])
self.response.out.write(render_template(DATAHUB_DASHBOARD_TEMPLATE,
template_values))
| Python |
#!/usr/bin/python2.5
# Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Datastore models."""
from google.appengine.api import memcache
from google.appengine.ext import db
import modelutils
class Error(Exception):
"""Generic error."""
pass
class BadAccountType(Error):
"""Account type is unknown (not facebook, friendconnect, or test)."""
pass
# Models
class UserInfo(db.Model):
"""Basic user statistics/preferences data."""
# Key is accounttype:user_id.
first_visit = db.DateTimeProperty(auto_now_add=True)
last_edit = db.DateTimeProperty(auto_now=True)
moderator = db.BooleanProperty(default=False)
moderator_request_email = db.StringProperty()
moderator_request_desc = db.TextProperty()
moderator_request_admin_notes = db.StringProperty(multiline=True)
def account_type(self):
"""Returns one of (FRIENDCONNECT, FACEBOOK, TEST)."""
key_name = self.key().name()
return key_name.split(':', 1)[0]
def user_id(self):
"""User id."""
key_name = self.key().name()
return key_name.split(':', 1)[1]
# Known types of accounts. Type must not start with a number.
FRIENDCONNECT = 'friendconnect'
FACEBOOK = 'facebook'
TEST = 'test'
KNOWN_TYPES = (FRIENDCONNECT, FACEBOOK, TEST)
@classmethod
def get_or_insert_user(cls, account_type, user_id):
"""Gets existing or creates a new user.
Similar to get_or_insert, increments UserStats if appropriate.
Args:
account_type: Type of account used.
user_id: address within that system.
Returns:
UserInfo for this user.
Raises:
BadAccountType if the account_type is unknown.
Various datastore exceptions.
"""
if not account_type in cls.KNOWN_TYPES:
raise BadAccountType()
key_name = '%s:%s' % (account_type, user_id)
user_info = cls.get_by_key_name(key_name)
def txn():
"""Transaction to get or insert user."""
entity = cls.get_by_key_name(key_name)
created_entity = False
if entity is None:
entity = cls(key_name=key_name)
entity.put()
created_entity = True
return (entity, created_entity)
(user_info, created_entity) = db.run_in_transaction(txn)
if created_entity:
UserStats.increment(account_type, user_id)
return user_info
class UserStats(db.Model):
"""Stats about how many users we have."""
count = db.IntegerProperty(default=0)
@classmethod
def increment(cls, account_type, user_id):
"""Sharded counter. User ID is only for sharding."""
def txn():
"""Transaction to increment account_type's stats."""
# We want << 1000 shards.
# This cheesy shard mechanism allows us some amount of way to see how
# many users of each type we have too.
shard_name = account_type + ':' + user_id[:2]
counter = cls.get_by_key_name(shard_name)
if not counter:
counter = cls(key_name=shard_name)
counter.count += 1
counter.put()
db.run_in_transaction(txn)
@staticmethod
def get_count():
"""Returns total number of users."""
total = 0
for counter in UserStats.all():
total += counter.count
return total
class UserInterest(db.Model):
"""Our record a user's actions related to an opportunity."""
# Key is ('id:%s#%s' % (the stable ID from base, user key name))
# stable ID is probabaly not the same ID provided in the feed from providers.
DATASTORE_PREFIX = 'id:'
user = db.ReferenceProperty(UserInfo, collection_name='interests')
opp_id = db.StringProperty()
liked_last_modified = db.DateTimeProperty()
# The interest types (liked, will_attend, etc) must exist with the
# same property names in UserInterest and VolunteerOpportunityStats,
# and be in sync with USER_INTEREST_ATTRIBUTES at the end of this file.
liked = db.IntegerProperty(default=0)
will_attend = db.IntegerProperty(default=0)
flagged = db.IntegerProperty(default=0)
@classmethod
def make_key_name(cls, user_entity, opp_id):
"""Generate key name for a given user_entity/opp_id pair."""
return '%s:%s#%s' % (cls.DATASTORE_PREFIX, opp_id, user_entity.key().name())
class VolunteerOpportunityStats(db.Model):
"""Basic statistics about opportunities."""
# The __key__ is 'id:' + volunteer_opportunity_id
DATASTORE_PREFIX = 'id:'
MEMCACHE_PREFIX = 'VolunteerOpportunityStats:'
MEMCACHE_TIME = 60000 # seconds
last_edit = db.DateTimeProperty(auto_now=True)
# The interest types (liked, will_attend, etc) must exist with the
# same property names in UserInterest and VolunteerOpportunityStats,
# and be in sync with USER_INTEREST_ATTRIBUTES at the end of this file.
liked = db.IntegerProperty(default=0)
will_attend = db.IntegerProperty(default=0)
flagged = db.IntegerProperty(default=0)
# Blacklist is controlled by the moderators only, it is not a statistic.
blacklisted = db.IntegerProperty(default=0)
@classmethod
def increment(cls, volunteer_opportunity_id, relative_attributes,
absolute_attributes=None):
"""Helper to increment volunteer opportunity stats.
Example:
VolunteerOpportunityStats.increment(opp_id,
{ USER_INTEREST_LIKED: 1, USER_INTEREST_WILL_ATTEND: 1 })
Args:
volunteer_opportunity_id: ID of opportunity.
relative_attributes: Dictionary of attr_name:value pairs to set as
relative to current value.
absolute_attributes: Dictionary of attr_name:value pairs to set as
absolute values.
Returns:
Success boolean
"""
entity = VolunteerOpportunityStats.get_or_insert(
cls.DATASTORE_PREFIX + volunteer_opportunity_id)
if not entity:
return False
(new_entity, unused_deltas) = \
modelutils.set_entity_attributes(entity, absolute_attributes,
relative_attributes)
memcache.set(cls.MEMCACHE_PREFIX + volunteer_opportunity_id, new_entity,
time=cls.MEMCACHE_TIME)
return True
@classmethod
def set_blacklisted(cls, volunteer_opportunity_id, value):
"""Helper to set volunteer opportunity value and update memcache."""
# A wrapper for 'increment'--it's overkill, but manages memcache for us.
return cls.increment(volunteer_opportunity_id, {}, {'blacklisted' : value})
@classmethod
def add_default_entities_to_memcache(cls, ids):
"""Add blank entities to memcache so get_by_ids quickly returns them."""
entities = {}
for key in ids:
entities[key] = cls(key_name= cls.DATASTORE_PREFIX + key)
memcache.add_multi(entities, time=cls.MEMCACHE_TIME,
key_prefix=cls.MEMCACHE_PREFIX)
class VolunteerOpportunity(db.Model):
"""Basic information about opportunities.
Separate from VolunteerOpportunityStats because these entries need not be
operated on transactionally since there's no counts.
"""
# The __key__ is 'id:' + volunteer_opportunity_id
DATASTORE_PREFIX = 'id:'
MEMCACHE_PREFIX = 'VolunteerOpportunity:'
MEMCACHE_TIME = 60000 # seconds
# Information about the opportunity
# URL to the Google Base entry
base_url = db.StringProperty()
# When we last update the Base URL.
last_base_url_update = db.DateTimeProperty()
# Incremented (possibly incorrectly to avoid transactions) when we try
# to load the data from base but fail. Also the last date/time seen.
base_url_failure_count = db.IntegerProperty(default=0)
last_base_url_update_failure = db.DateTimeProperty()
# TODO(paul): added_to_calendar, added_to_facebook_profile, etc
USER_INTEREST_LIKED = 'liked'
USER_INTEREST_WILL_ATTEND = 'will_attend'
USER_INTEREST_FLAGGED = 'flagged'
USER_INTEREST_ATTRIBUTES = (
USER_INTEREST_LIKED,
USER_INTEREST_WILL_ATTEND,
USER_INTEREST_FLAGGED,
)
| Python |
#! /usr/bin/env python
#
# pyfacebook - Python bindings for the Facebook API
#
# Copyright (c) 2008, Samuel Cormier-Iijima
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author nor the names of its contributors may
# be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
Python bindings for the Facebook API (pyfacebook - http://code.google.com/p/pyfacebook)
PyFacebook is a client library that wraps the Facebook API.
For more information, see
Home Page: http://code.google.com/p/pyfacebook
Developer Wiki: http://wiki.developers.facebook.com/index.php/Python
Facebook IRC Channel: #facebook on irc.freenode.net
PyFacebook can use simplejson if it is installed, which
is much faster than XML and also uses less bandwith. Go to
http://undefined.org/python/#simplejson to download it, or do
apt-get install python-simplejson on a Debian-like system.
"""
import md5
import sys
import time
import struct
import urllib
import urllib2
import httplib
import hashlib
import binascii
import urlparse
import mimetypes
# try to use simplejson first, otherwise fallback to XML
RESPONSE_FORMAT = 'JSON'
try:
import json as simplejson
except ImportError:
try:
import simplejson
except ImportError:
try:
from django.utils import simplejson
except ImportError:
try:
import jsonlib as simplejson
simplejson.loads
except (ImportError, AttributeError):
from xml.dom import minidom
RESPONSE_FORMAT = 'XML'
# support Google App Engine. GAE does not have a working urllib.urlopen.
try:
from google.appengine.api import urlfetch
def urlread(url, data=None, headers=None):
if data is not None:
if headers is None:
headers = {"Content-type": "application/x-www-form-urlencoded"}
method = urlfetch.POST
else:
if headers is None:
headers = {}
method = urlfetch.GET
result = urlfetch.fetch(url, method=method,
payload=data, headers=headers)
if result.status_code == 200:
return result.content
else:
raise urllib2.URLError("fetch error url=%s, code=%d" % (url, result.status_code))
except ImportError:
def urlread(url, data=None):
res = urllib2.urlopen(url, data=data)
return res.read()
__all__ = ['Facebook']
VERSION = '0.1'
FACEBOOK_URL = 'http://api.facebook.com/restserver.php'
FACEBOOK_SECURE_URL = 'https://api.facebook.com/restserver.php'
class json(object): pass
# simple IDL for the Facebook API
METHODS = {
'application': {
'getPublicInfo': [
('application_id', int, ['optional']),
('application_api_key', str, ['optional']),
('application_canvas_name ', str,['optional']),
],
},
# admin methods
'admin': {
'getAllocation': [
('integration_point_name', str, []),
],
},
# feed methods
'feed': {
'publishStoryToUser': [
('title', str, []),
('body', str, ['optional']),
('image_1', str, ['optional']),
('image_1_link', str, ['optional']),
('image_2', str, ['optional']),
('image_2_link', str, ['optional']),
('image_3', str, ['optional']),
('image_3_link', str, ['optional']),
('image_4', str, ['optional']),
('image_4_link', str, ['optional']),
('priority', int, ['optional']),
],
'publishActionOfUser': [
('title', str, []),
('body', str, ['optional']),
('image_1', str, ['optional']),
('image_1_link', str, ['optional']),
('image_2', str, ['optional']),
('image_2_link', str, ['optional']),
('image_3', str, ['optional']),
('image_3_link', str, ['optional']),
('image_4', str, ['optional']),
('image_4_link', str, ['optional']),
('priority', int, ['optional']),
],
'publishTemplatizedAction': [
('title_template', str, []),
('page_actor_id', int, ['optional']),
('title_data', json, ['optional']),
('body_template', str, ['optional']),
('body_data', json, ['optional']),
('body_general', str, ['optional']),
('image_1', str, ['optional']),
('image_1_link', str, ['optional']),
('image_2', str, ['optional']),
('image_2_link', str, ['optional']),
('image_3', str, ['optional']),
('image_3_link', str, ['optional']),
('image_4', str, ['optional']),
('image_4_link', str, ['optional']),
('target_ids', list, ['optional']),
],
'registerTemplateBundle': [
('one_line_story_templates', json, []),
('short_story_templates', json, ['optional']),
('full_story_template', json, ['optional']),
('action_links', json, ['optional']),
],
'deactivateTemplateBundleByID': [
('template_bundle_id', int, []),
],
'getRegisteredTemplateBundles': [],
'getRegisteredTemplateBundleByID': [
('template_bundle_id', str, []),
],
'publishUserAction': [
('template_bundle_id', int, []),
('template_data', json, ['optional']),
('target_ids', list, ['optional']),
('body_general', str, ['optional']),
],
},
# fql methods
'fql': {
'query': [
('query', str, []),
],
},
# friends methods
'friends': {
'areFriends': [
('uids1', list, []),
('uids2', list, []),
],
'get': [
('flid', int, ['optional']),
],
'getLists': [],
'getAppUsers': [],
},
# notifications methods
'notifications': {
'get': [],
'send': [
('to_ids', list, []),
('notification', str, []),
('email', str, ['optional']),
('type', str, ['optional']),
],
'sendRequest': [
('to_ids', list, []),
('type', str, []),
('content', str, []),
('image', str, []),
('invite', bool, []),
],
'sendEmail': [
('recipients', list, []),
('subject', str, []),
('text', str, ['optional']),
('fbml', str, ['optional']),
]
},
# profile methods
'profile': {
'setFBML': [
('markup', str, ['optional']),
('uid', int, ['optional']),
('profile', str, ['optional']),
('profile_action', str, ['optional']),
('mobile_fbml', str, ['optional']),
('profile_main', str, ['optional']),
],
'getFBML': [
('uid', int, ['optional']),
('type', int, ['optional']),
],
'setInfo': [
('title', str, []),
('type', int, []),
('info_fields', json, []),
('uid', int, []),
],
'getInfo': [
('uid', int, []),
],
'setInfoOptions': [
('field', str, []),
('options', json, []),
],
'getInfoOptions': [
('field', str, []),
],
},
# users methods
'users': {
'getInfo': [
('uids', list, []),
('fields', list, [('default', ['name'])]),
],
'getStandardInfo': [
('uids', list, []),
('fields', list, [('default', ['uid'])]),
],
'getLoggedInUser': [],
'isAppAdded': [],
'hasAppPermission': [
('ext_perm', str, []),
('uid', int, ['optional']),
],
'setStatus': [
('status', str, []),
('clear', bool, []),
('status_includes_verb', bool, ['optional']),
('uid', int, ['optional']),
],
},
# events methods
'events': {
'get': [
('uid', int, ['optional']),
('eids', list, ['optional']),
('start_time', int, ['optional']),
('end_time', int, ['optional']),
('rsvp_status', str, ['optional']),
],
'getMembers': [
('eid', int, []),
],
'create': [
('event_info', json, []),
],
},
# update methods
'update': {
'decodeIDs': [
('ids', list, []),
],
},
# groups methods
'groups': {
'get': [
('uid', int, ['optional']),
('gids', list, ['optional']),
],
'getMembers': [
('gid', int, []),
],
},
# marketplace methods
'marketplace': {
'createListing': [
('listing_id', int, []),
('show_on_profile', bool, []),
('listing_attrs', str, []),
],
'getCategories': [],
'getListings': [
('listing_ids', list, []),
('uids', list, []),
],
'getSubCategories': [
('category', str, []),
],
'removeListing': [
('listing_id', int, []),
('status', str, []),
],
'search': [
('category', str, ['optional']),
('subcategory', str, ['optional']),
('query', str, ['optional']),
],
},
# pages methods
'pages': {
'getInfo': [
('page_ids', list, ['optional']),
('uid', int, ['optional']),
],
'isAdmin': [
('page_id', int, []),
],
'isAppAdded': [
('page_id', int, []),
],
'isFan': [
('page_id', int, []),
('uid', int, []),
],
},
# photos methods
'photos': {
'addTag': [
('pid', int, []),
('tag_uid', int, [('default', 0)]),
('tag_text', str, [('default', '')]),
('x', float, [('default', 50)]),
('y', float, [('default', 50)]),
('tags', str, ['optional']),
],
'createAlbum': [
('name', str, []),
('location', str, ['optional']),
('description', str, ['optional']),
],
'get': [
('subj_id', int, ['optional']),
('aid', int, ['optional']),
('pids', list, ['optional']),
],
'getAlbums': [
('uid', int, ['optional']),
('aids', list, ['optional']),
],
'getTags': [
('pids', list, []),
],
},
# fbml methods
'fbml': {
'refreshImgSrc': [
('url', str, []),
],
'refreshRefUrl': [
('url', str, []),
],
'setRefHandle': [
('handle', str, []),
('fbml', str, []),
],
},
# SMS Methods
'sms' : {
'canSend' : [
('uid', int, []),
],
'send' : [
('uid', int, []),
('message', str, []),
('session_id', int, []),
('req_session', bool, []),
],
},
'data': {
'getCookies': [
('uid', int, []),
('string', str, []),
],
'setCookie': [
('uid', int, []),
('name', str, []),
('value', str, []),
('expires', int, ['optional']),
('path', str, ['optional']),
],
},
# connect methods
'connect': {
'registerUsers': [
('accounts', json, []),
],
'unregisterUsers': [
('email_hashes', json, []),
],
'getUnconnectedFriendsCount': [
],
},
}
class Proxy(object):
"""Represents a "namespace" of Facebook API calls."""
def __init__(self, client, name):
self._client = client
self._name = name
def __call__(self, method=None, args=None, add_session_args=True):
# for Django templates
if method is None:
return self
if add_session_args:
self._client._add_session_args(args)
return self._client('%s.%s' % (self._name, method), args)
# generate the Facebook proxies
def __generate_proxies():
for namespace in METHODS:
methods = {}
for method in METHODS[namespace]:
params = ['self']
body = ['args = {}']
for param_name, param_type, param_options in METHODS[namespace][method]:
param = param_name
for option in param_options:
if isinstance(option, tuple) and option[0] == 'default':
if param_type == list:
param = '%s=None' % param_name
body.append('if %s is None: %s = %s' % (param_name, param_name, repr(option[1])))
else:
param = '%s=%s' % (param_name, repr(option[1]))
if param_type == json:
# we only jsonify the argument if it's a list or a dict, for compatibility
body.append('if isinstance(%s, list) or isinstance(%s, dict): %s = simplejson.dumps(%s)' % ((param_name,) * 4))
if 'optional' in param_options:
param = '%s=None' % param_name
body.append('if %s is not None: args[\'%s\'] = %s' % (param_name, param_name, param_name))
else:
body.append('args[\'%s\'] = %s' % (param_name, param_name))
params.append(param)
# simple docstring to refer them to Facebook API docs
body.insert(0, '"""Facebook API call. See http://developers.facebook.com/documentation.php?v=1.0&method=%s.%s"""' % (namespace, method))
body.insert(0, 'def %s(%s):' % (method, ', '.join(params)))
body.append('return self(\'%s\', args)' % method)
exec('\n '.join(body))
methods[method] = eval(method)
proxy = type('%sProxy' % namespace.title(), (Proxy, ), methods)
globals()[proxy.__name__] = proxy
__generate_proxies()
class FacebookError(Exception):
"""Exception class for errors received from Facebook."""
def __init__(self, code, msg, args=None):
self.code = code
self.msg = msg
self.args = args
def __str__(self):
return 'Error %s: %s' % (self.code, self.msg)
class AuthProxy(Proxy):
"""Special proxy for facebook.auth."""
def getSession(self):
"""Facebook API call. See http://developers.facebook.com/documentation.php?v=1.0&method=auth.getSession"""
args = {}
try:
args['auth_token'] = self._client.auth_token
except AttributeError:
raise RuntimeError('Client does not have auth_token set.')
result = self._client('%s.getSession' % self._name, args)
self._client.session_key = result['session_key']
self._client.uid = result['uid']
self._client.secret = result.get('secret')
self._client.session_key_expires = result['expires']
return result
def createToken(self):
"""Facebook API call. See http://developers.facebook.com/documentation.php?v=1.0&method=auth.createToken"""
token = self._client('%s.createToken' % self._name)
self._client.auth_token = token
return token
class FriendsProxy(FriendsProxy):
"""Special proxy for facebook.friends."""
def get(self, **kwargs):
"""Facebook API call. See http://developers.facebook.com/documentation.php?v=1.0&method=friends.get"""
if not kwargs.get('flid') and self._client._friends:
return self._client._friends
return super(FriendsProxy, self).get(**kwargs)
class PhotosProxy(PhotosProxy):
"""Special proxy for facebook.photos."""
def upload(self, image, aid=None, caption=None, size=(604, 1024), filename=None):
"""Facebook API call. See http://developers.facebook.com/documentation.php?v=1.0&method=photos.upload
size -- an optional size (width, height) to resize the image to before uploading. Resizes by default
to Facebook's maximum display width of 604.
"""
args = {}
if aid is not None:
args['aid'] = aid
if caption is not None:
args['caption'] = caption
args = self._client._build_post_args('facebook.photos.upload', self._client._add_session_args(args))
try:
import cStringIO as StringIO
except ImportError:
import StringIO
# check for a filename specified...if the user is passing binary data in
# image then a filename will be specified
if filename is None:
try:
import Image
except ImportError:
data = StringIO.StringIO(open(image, 'rb').read())
else:
img = Image.open(image)
if size:
img.thumbnail(size, Image.ANTIALIAS)
data = StringIO.StringIO()
img.save(data, img.format)
else:
# there was a filename specified, which indicates that image was not
# the path to an image file but rather the binary data of a file
data = StringIO.StringIO(image)
image = filename
content_type, body = self.__encode_multipart_formdata(list(args.iteritems()), [(image, data)])
urlinfo = urlparse.urlsplit(self._client.facebook_url)
try:
h = httplib.HTTP(urlinfo[1])
h.putrequest('POST', urlinfo[2])
h.putheader('Content-Type', content_type)
h.putheader('Content-Length', str(len(body)))
h.putheader('MIME-Version', '1.0')
h.putheader('User-Agent', 'PyFacebook Client Library')
h.endheaders()
h.send(body)
reply = h.getreply()
if reply[0] != 200:
raise Exception('Error uploading photo: Facebook returned HTTP %s (%s)' % (reply[0], reply[1]))
response = h.file.read()
except:
# sending the photo failed, perhaps we are using GAE
try:
from google.appengine.api import urlfetch
try:
response = urlread(url=self._client.facebook_url,data=body,headers={'POST':urlinfo[2],'Content-Type':content_type,'MIME-Version':'1.0'})
except urllib2.URLError:
raise Exception('Error uploading photo: Facebook returned %s' % (response))
except ImportError:
# could not import from google.appengine.api, so we are not running in GAE
raise Exception('Error uploading photo.')
return self._client._parse_response(response, 'facebook.photos.upload')
def __encode_multipart_formdata(self, fields, files):
"""Encodes a multipart/form-data message to upload an image."""
boundary = '-------tHISiStheMulTIFoRMbOUNDaRY'
crlf = '\r\n'
l = []
for (key, value) in fields:
l.append('--' + boundary)
l.append('Content-Disposition: form-data; name="%s"' % str(key))
l.append('')
l.append(str(value))
for (filename, value) in files:
l.append('--' + boundary)
l.append('Content-Disposition: form-data; filename="%s"' % (str(filename), ))
l.append('Content-Type: %s' % self.__get_content_type(filename))
l.append('')
l.append(value.getvalue())
l.append('--' + boundary + '--')
l.append('')
body = crlf.join(l)
content_type = 'multipart/form-data; boundary=%s' % boundary
return content_type, body
def __get_content_type(self, filename):
"""Returns a guess at the MIME type of the file from the filename."""
return str(mimetypes.guess_type(filename)[0]) or 'application/octet-stream'
class Facebook(object):
"""
Provides access to the Facebook API.
Instance Variables:
added
True if the user has added this application.
api_key
Your API key, as set in the constructor.
app_name
Your application's name, i.e. the APP_NAME in http://apps.facebook.com/APP_NAME/ if
this is for an internal web application. Optional, but useful for automatic redirects
to canvas pages.
auth_token
The auth token that Facebook gives you, either with facebook.auth.createToken,
or through a GET parameter.
callback_path
The path of the callback set in the Facebook app settings. If your callback is set
to http://www.example.com/facebook/callback/, this should be '/facebook/callback/'.
Optional, but useful for automatic redirects back to the same page after login.
desktop
True if this is a desktop app, False otherwise. Used for determining how to
authenticate.
facebook_url
The url to use for Facebook requests.
facebook_secure_url
The url to use for secure Facebook requests.
in_canvas
True if the current request is for a canvas page.
internal
True if this Facebook object is for an internal application (one that can be added on Facebook)
page_id
Set to the page_id of the current page (if any)
secret
Secret that is used after getSession for desktop apps.
secret_key
Your application's secret key, as set in the constructor.
session_key
The current session key. Set automatically by auth.getSession, but can be set
manually for doing infinite sessions.
session_key_expires
The UNIX time of when this session key expires, or 0 if it never expires.
uid
After a session is created, you can get the user's UID with this variable. Set
automatically by auth.getSession.
----------------------------------------------------------------------
"""
def __init__(self, api_key, secret_key, auth_token=None, app_name=None, callback_path=None, internal=None, proxy=None, facebook_url=None, facebook_secure_url=None):
"""
Initializes a new Facebook object which provides wrappers for the Facebook API.
If this is a desktop application, the next couple of steps you might want to take are:
facebook.auth.createToken() # create an auth token
facebook.login() # show a browser window
wait_login() # somehow wait for the user to log in
facebook.auth.getSession() # get a session key
For web apps, if you are passed an auth_token from Facebook, pass that in as a named parameter.
Then call:
facebook.auth.getSession()
"""
self.api_key = api_key
self.secret_key = secret_key
self.session_key = None
self.session_key_expires = None
self.auth_token = auth_token
self.secret = None
self.uid = None
self.page_id = None
self.in_canvas = False
self.added = False
self.app_name = app_name
self.callback_path = callback_path
self.internal = internal
self._friends = None
self.proxy = proxy
if facebook_url is None:
self.facebook_url = FACEBOOK_URL
else:
self.facebook_url = facebook_url
if facebook_secure_url is None:
self.facebook_secure_url = FACEBOOK_SECURE_URL
else:
self.facebook_secure_url = facebook_secure_url
for namespace in METHODS:
self.__dict__[namespace] = eval('%sProxy(self, \'%s\')' % (namespace.title(), 'facebook.%s' % namespace))
self.auth = AuthProxy(self, 'facebook.auth')
def _hash_args(self, args, secret=None):
"""Hashes arguments by joining key=value pairs, appending a secret, and then taking the MD5 hex digest."""
# @author: houyr
# fix for UnicodeEncodeError
hasher = md5.new(''.join(['%s=%s' % (isinstance(x, unicode) and x.encode("utf-8") or x, isinstance(args[x], unicode) and args[x].encode("utf-8") or args[x]) for x in sorted(args.keys())]))
if secret:
hasher.update(secret)
elif self.secret:
hasher.update(self.secret)
else:
hasher.update(self.secret_key)
return hasher.hexdigest()
def _parse_response_item(self, node):
"""Parses an XML response node from Facebook."""
if node.nodeType == node.DOCUMENT_NODE and \
node.childNodes[0].hasAttributes() and \
node.childNodes[0].hasAttribute('list') and \
node.childNodes[0].getAttribute('list') == "true":
return {node.childNodes[0].nodeName: self._parse_response_list(node.childNodes[0])}
elif node.nodeType == node.ELEMENT_NODE and \
node.hasAttributes() and \
node.hasAttribute('list') and \
node.getAttribute('list')=="true":
return self._parse_response_list(node)
elif len(filter(lambda x: x.nodeType == x.ELEMENT_NODE, node.childNodes)) > 0:
return self._parse_response_dict(node)
else:
return ''.join(node.data for node in node.childNodes if node.nodeType == node.TEXT_NODE)
def _parse_response_dict(self, node):
"""Parses an XML dictionary response node from Facebook."""
result = {}
for item in filter(lambda x: x.nodeType == x.ELEMENT_NODE, node.childNodes):
result[item.nodeName] = self._parse_response_item(item)
if node.nodeType == node.ELEMENT_NODE and node.hasAttributes():
if node.hasAttribute('id'):
result['id'] = node.getAttribute('id')
return result
def _parse_response_list(self, node):
"""Parses an XML list response node from Facebook."""
result = []
for item in filter(lambda x: x.nodeType == x.ELEMENT_NODE, node.childNodes):
result.append(self._parse_response_item(item))
return result
def _check_error(self, response):
"""Checks if the given Facebook response is an error, and then raises the appropriate exception."""
if type(response) is dict and response.has_key('error_code'):
raise FacebookError(response['error_code'], response['error_msg'], response['request_args'])
def _build_post_args(self, method, args=None):
"""Adds to args parameters that are necessary for every call to the API."""
if args is None:
args = {}
for arg in args.items():
if type(arg[1]) == list:
args[arg[0]] = ','.join(str(a) for a in arg[1])
elif type(arg[1]) == unicode:
args[arg[0]] = arg[1].encode("UTF-8")
elif type(arg[1]) == bool:
args[arg[0]] = str(arg[1]).lower()
args['method'] = method
args['api_key'] = self.api_key
args['v'] = '1.0'
args['format'] = RESPONSE_FORMAT
args['sig'] = self._hash_args(args)
return args
def _add_session_args(self, args=None):
"""Adds 'session_key' and 'call_id' to args, which are used for API calls that need sessions."""
if args is None:
args = {}
if not self.session_key:
return args
#some calls don't need a session anymore. this might be better done in the markup
#raise RuntimeError('Session key not set. Make sure auth.getSession has been called.')
args['session_key'] = self.session_key
args['call_id'] = str(int(time.time() * 1000))
return args
def _parse_response(self, response, method, format=None):
"""Parses the response according to the given (optional) format, which should be either 'JSON' or 'XML'."""
if not format:
format = RESPONSE_FORMAT
if format == 'JSON':
result = simplejson.loads(response)
self._check_error(result)
elif format == 'XML':
dom = minidom.parseString(response)
result = self._parse_response_item(dom)
dom.unlink()
if 'error_response' in result:
self._check_error(result['error_response'])
result = result[method[9:].replace('.', '_') + '_response']
else:
raise RuntimeError('Invalid format specified.')
return result
def hash_email(self, email):
"""
Hash an email address in a format suitable for Facebook Connect.
"""
email = email.lower().strip()
return "%s_%s" % (
struct.unpack("I", struct.pack("i", binascii.crc32(email)))[0],
hashlib.md5(email).hexdigest(),
)
def unicode_urlencode(self, params):
"""
@author: houyr
A unicode aware version of urllib.urlencode.
"""
if isinstance(params, dict):
params = params.items()
return urllib.urlencode([(k, isinstance(v, unicode) and v.encode('utf-8') or v)
for k, v in params])
def __call__(self, method=None, args=None, secure=False):
"""Make a call to Facebook's REST server."""
# for Django templates, if this object is called without any arguments
# return the object itself
if method is None:
return self
# @author: houyr
# fix for bug of UnicodeEncodeError
post_data = self.unicode_urlencode(self._build_post_args(method, args))
if self.proxy:
proxy_handler = urllib2.ProxyHandler(self.proxy)
opener = urllib2.build_opener(proxy_handler)
if secure:
response = opener.open(self.facebook_secure_url, post_data).read()
else:
response = opener.open(self.facebook_url, post_data).read()
else:
if secure:
response = urlread(self.facebook_secure_url, post_data)
else:
response = urlread(self.facebook_url, post_data)
return self._parse_response(response, method)
# URL helpers
def get_url(self, page, **args):
"""
Returns one of the Facebook URLs (www.facebook.com/SOMEPAGE.php).
Named arguments are passed as GET query string parameters.
"""
return 'http://www.facebook.com/%s.php?%s' % (page, urllib.urlencode(args))
def get_app_url(self, path=''):
"""
Returns the URL for this app's canvas page, according to app_name.
"""
return 'http://apps.facebook.com/%s/%s' % (self.app_name, path)
def get_add_url(self, next=None):
"""
Returns the URL that the user should be redirected to in order to add the application.
"""
args = {'api_key': self.api_key, 'v': '1.0'}
if next is not None:
args['next'] = next
return self.get_url('install', **args)
def get_authorize_url(self, next=None, next_cancel=None):
"""
Returns the URL that the user should be redirected to in order to
authorize certain actions for application.
"""
args = {'api_key': self.api_key, 'v': '1.0'}
if next is not None:
args['next'] = next
if next_cancel is not None:
args['next_cancel'] = next_cancel
return self.get_url('authorize', **args)
def get_login_url(self, next=None, popup=False, canvas=True):
"""
Returns the URL that the user should be redirected to in order to login.
next -- the URL that Facebook should redirect to after login
"""
args = {'api_key': self.api_key, 'v': '1.0'}
if next is not None:
args['next'] = next
if canvas is True:
args['canvas'] = 1
if popup is True:
args['popup'] = 1
if self.auth_token is not None:
args['auth_token'] = self.auth_token
return self.get_url('login', **args)
def login(self, popup=False):
"""Open a web browser telling the user to login to Facebook."""
import webbrowser
webbrowser.open(self.get_login_url(popup=popup))
def get_ext_perm_url(self, ext_perm, next=None, popup=False):
"""
Returns the URL that the user should be redirected to in order to grant an extended permission.
ext_perm -- the name of the extended permission to request
next -- the URL that Facebook should redirect to after login
"""
args = {'ext_perm': ext_perm, 'api_key': self.api_key, 'v': '1.0'}
if next is not None:
args['next'] = next
if popup is True:
args['popup'] = 1
return self.get_url('authorize', **args)
def request_extended_permission(self, ext_perm, popup=False):
"""Open a web browser telling the user to grant an extended permission."""
import webbrowser
webbrowser.open(self.get_ext_perm_url(ext_perm, popup=popup))
def check_session(self, request):
"""
Checks the given Django HttpRequest for Facebook parameters such as
POST variables or an auth token. If the session is valid, returns True
and this object can now be used to access the Facebook API. Otherwise,
it returns False, and the application should take the appropriate action
(either log the user in or have him add the application).
"""
self.in_canvas = (request.POST.get('fb_sig_in_canvas') == '1')
if self.session_key and (self.uid or self.page_id):
return True
if request.method == 'POST':
params = self.validate_signature(request.POST)
else:
if 'installed' in request.GET:
self.added = True
if 'fb_page_id' in request.GET:
self.page_id = request.GET['fb_page_id']
if 'auth_token' in request.GET:
self.auth_token = request.GET['auth_token']
try:
self.auth.getSession()
except FacebookError, e:
self.auth_token = None
return False
return True
params = self.validate_signature(request.GET)
if not params:
# first check if we are in django - to check cookies
if hasattr(request, 'COOKIES'):
params = self.validate_cookie_signature(request.COOKIES)
else:
# if not, then we might be on GoogleAppEngine, check their request object cookies
if hasattr(request,'cookies'):
params = self.validate_cookie_signature(request.cookies)
if not params:
return False
if params.get('in_canvas') == '1':
self.in_canvas = True
if params.get('added') == '1':
self.added = True
if params.get('expires'):
self.session_key_expires = int(params['expires'])
if 'friends' in params:
if params['friends']:
self._friends = params['friends'].split(',')
else:
self._friends = []
if 'session_key' in params:
self.session_key = params['session_key']
if 'user' in params:
self.uid = params['user']
elif 'page_id' in params:
self.page_id = params['page_id']
else:
return False
elif 'profile_session_key' in params:
self.session_key = params['profile_session_key']
if 'profile_user' in params:
self.uid = params['profile_user']
else:
return False
else:
return False
return True
def validate_signature(self, post, prefix='fb_sig', timeout=None):
"""
Validate parameters passed to an internal Facebook app from Facebook.
"""
args = post.copy()
if prefix not in args:
return None
del args[prefix]
if timeout and '%s_time' % prefix in post and time.time() - float(post['%s_time' % prefix]) > timeout:
return None
args = dict([(key[len(prefix + '_'):], value) for key, value in args.items() if key.startswith(prefix)])
hash = self._hash_args(args)
if hash == post[prefix]:
return args
else:
return None
def validate_cookie_signature(self, cookies):
"""
Validate parameters passed by cookies, namely facebookconnect or js api.
"""
if not self.api_key in cookies.keys():
return None
sigkeys = []
params = dict()
for k in sorted(cookies.keys()):
if k.startswith(self.api_key+"_"):
sigkeys.append(k)
params[k.replace(self.api_key+"_","")] = cookies[k]
vals = ''.join(['%s=%s' % (x.replace(self.api_key+"_",""), cookies[x]) for x in sigkeys])
hasher = md5.new(vals)
hasher.update(self.secret_key)
digest = hasher.hexdigest()
if digest == cookies[self.api_key]:
return params
else:
return False
######## Note: This code was not written by facebook
def check_connect_session(self, request):
"""
For use in a facebook Connect application running in Google App Engine
Takes a Google App Engine Request
http://code.google.com/appengine/docs/webapp/requestclass.html
and determines if the current user has a valid session
"""
# our session is stored in cookies - validate them
params = self.validate_cookie(request.cookies)
if not params:
return False
if params.get('expires'):
self.session_key_expires = int(params['expires'])
if 'session_key' in params and 'user' in params:
self.session_key = params['session_key']
self.uid = params['user']
else:
return False
return True
def validate_cookie(self, cookies):
"""
Validates parameters passed to a Facebook connect app through cookies
"""
# check for the hashed secret
if self.api_key not in cookies:
return None
# create a dict of the elements that start with the api_key
# the resultant dict removes the self.api_key from the beginning
args = dict([(key[len(self.api_key) + 1:], value)
for key, value in cookies.items()
if key.startswith(self.api_key + "_")])
# check the hashes match before returning them
if self._hash_args(args) == cookies[self.api_key]:
return args
return None
if __name__ == '__main__':
# sample desktop application
api_key = ''
secret_key = ''
facebook = Facebook(api_key, secret_key)
facebook.auth.createToken()
# Show login window
# Set popup=True if you want login without navigational elements
facebook.login()
# Login to the window, then press enter
print 'After logging in, press enter...'
raw_input()
facebook.auth.getSession()
print 'Session Key: ', facebook.session_key
print 'Your UID: ', facebook.uid
info = facebook.users.getInfo([facebook.uid], ['name', 'birthday', 'affiliations', 'sex'])[0]
print 'Your Name: ', info['name']
print 'Your Birthday: ', info['birthday']
print 'Your Gender: ', info['sex']
friends = facebook.friends.get()
friends = facebook.users.getInfo(friends[0:5], ['name', 'birthday', 'relationship_status'])
for friend in friends:
print friend['name'], 'has a birthday on', friend['birthday'], 'and is', friend['relationship_status']
arefriends = facebook.friends.areFriends([friends[0]['uid']], [friends[1]['uid']])
photos = facebook.photos.getAlbums(facebook.uid)
| Python |
#! /usr/bin/env python
#
# pyfacebook - Python bindings for the Facebook API
#
# Copyright (c) 2008, Samuel Cormier-Iijima
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author nor the names of its contributors may
# be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
Python bindings for the Facebook API (pyfacebook - http://code.google.com/p/pyfacebook)
PyFacebook is a client library that wraps the Facebook API.
For more information, see
Home Page: http://code.google.com/p/pyfacebook
Developer Wiki: http://wiki.developers.facebook.com/index.php/Python
Facebook IRC Channel: #facebook on irc.freenode.net
PyFacebook can use simplejson if it is installed, which
is much faster than XML and also uses less bandwith. Go to
http://undefined.org/python/#simplejson to download it, or do
apt-get install python-simplejson on a Debian-like system.
"""
import md5
import sys
import time
import struct
import urllib
import urllib2
import httplib
import hashlib
import binascii
import urlparse
import mimetypes
# try to use simplejson first, otherwise fallback to XML
RESPONSE_FORMAT = 'JSON'
try:
import json as simplejson
except ImportError:
try:
import simplejson
except ImportError:
try:
from django.utils import simplejson
except ImportError:
try:
import jsonlib as simplejson
simplejson.loads
except (ImportError, AttributeError):
from xml.dom import minidom
RESPONSE_FORMAT = 'XML'
# support Google App Engine. GAE does not have a working urllib.urlopen.
try:
from google.appengine.api import urlfetch
def urlread(url, data=None, headers=None):
if data is not None:
if headers is None:
headers = {"Content-type": "application/x-www-form-urlencoded"}
method = urlfetch.POST
else:
if headers is None:
headers = {}
method = urlfetch.GET
result = urlfetch.fetch(url, method=method,
payload=data, headers=headers)
if result.status_code == 200:
return result.content
else:
raise urllib2.URLError("fetch error url=%s, code=%d" % (url, result.status_code))
except ImportError:
def urlread(url, data=None):
res = urllib2.urlopen(url, data=data)
return res.read()
__all__ = ['Facebook']
VERSION = '0.1'
FACEBOOK_URL = 'http://api.facebook.com/restserver.php'
FACEBOOK_SECURE_URL = 'https://api.facebook.com/restserver.php'
class json(object): pass
# simple IDL for the Facebook API
METHODS = {
'application': {
'getPublicInfo': [
('application_id', int, ['optional']),
('application_api_key', str, ['optional']),
('application_canvas_name ', str,['optional']),
],
},
# admin methods
'admin': {
'getAllocation': [
('integration_point_name', str, []),
],
},
# feed methods
'feed': {
'publishStoryToUser': [
('title', str, []),
('body', str, ['optional']),
('image_1', str, ['optional']),
('image_1_link', str, ['optional']),
('image_2', str, ['optional']),
('image_2_link', str, ['optional']),
('image_3', str, ['optional']),
('image_3_link', str, ['optional']),
('image_4', str, ['optional']),
('image_4_link', str, ['optional']),
('priority', int, ['optional']),
],
'publishActionOfUser': [
('title', str, []),
('body', str, ['optional']),
('image_1', str, ['optional']),
('image_1_link', str, ['optional']),
('image_2', str, ['optional']),
('image_2_link', str, ['optional']),
('image_3', str, ['optional']),
('image_3_link', str, ['optional']),
('image_4', str, ['optional']),
('image_4_link', str, ['optional']),
('priority', int, ['optional']),
],
'publishTemplatizedAction': [
('title_template', str, []),
('page_actor_id', int, ['optional']),
('title_data', json, ['optional']),
('body_template', str, ['optional']),
('body_data', json, ['optional']),
('body_general', str, ['optional']),
('image_1', str, ['optional']),
('image_1_link', str, ['optional']),
('image_2', str, ['optional']),
('image_2_link', str, ['optional']),
('image_3', str, ['optional']),
('image_3_link', str, ['optional']),
('image_4', str, ['optional']),
('image_4_link', str, ['optional']),
('target_ids', list, ['optional']),
],
'registerTemplateBundle': [
('one_line_story_templates', json, []),
('short_story_templates', json, ['optional']),
('full_story_template', json, ['optional']),
('action_links', json, ['optional']),
],
'deactivateTemplateBundleByID': [
('template_bundle_id', int, []),
],
'getRegisteredTemplateBundles': [],
'getRegisteredTemplateBundleByID': [
('template_bundle_id', str, []),
],
'publishUserAction': [
('template_bundle_id', int, []),
('template_data', json, ['optional']),
('target_ids', list, ['optional']),
('body_general', str, ['optional']),
],
},
# fql methods
'fql': {
'query': [
('query', str, []),
],
},
# friends methods
'friends': {
'areFriends': [
('uids1', list, []),
('uids2', list, []),
],
'get': [
('flid', int, ['optional']),
],
'getLists': [],
'getAppUsers': [],
},
# notifications methods
'notifications': {
'get': [],
'send': [
('to_ids', list, []),
('notification', str, []),
('email', str, ['optional']),
('type', str, ['optional']),
],
'sendRequest': [
('to_ids', list, []),
('type', str, []),
('content', str, []),
('image', str, []),
('invite', bool, []),
],
'sendEmail': [
('recipients', list, []),
('subject', str, []),
('text', str, ['optional']),
('fbml', str, ['optional']),
]
},
# profile methods
'profile': {
'setFBML': [
('markup', str, ['optional']),
('uid', int, ['optional']),
('profile', str, ['optional']),
('profile_action', str, ['optional']),
('mobile_fbml', str, ['optional']),
('profile_main', str, ['optional']),
],
'getFBML': [
('uid', int, ['optional']),
('type', int, ['optional']),
],
'setInfo': [
('title', str, []),
('type', int, []),
('info_fields', json, []),
('uid', int, []),
],
'getInfo': [
('uid', int, []),
],
'setInfoOptions': [
('field', str, []),
('options', json, []),
],
'getInfoOptions': [
('field', str, []),
],
},
# users methods
'users': {
'getInfo': [
('uids', list, []),
('fields', list, [('default', ['name'])]),
],
'getStandardInfo': [
('uids', list, []),
('fields', list, [('default', ['uid'])]),
],
'getLoggedInUser': [],
'isAppAdded': [],
'hasAppPermission': [
('ext_perm', str, []),
('uid', int, ['optional']),
],
'setStatus': [
('status', str, []),
('clear', bool, []),
('status_includes_verb', bool, ['optional']),
('uid', int, ['optional']),
],
},
# events methods
'events': {
'get': [
('uid', int, ['optional']),
('eids', list, ['optional']),
('start_time', int, ['optional']),
('end_time', int, ['optional']),
('rsvp_status', str, ['optional']),
],
'getMembers': [
('eid', int, []),
],
'create': [
('event_info', json, []),
],
},
# update methods
'update': {
'decodeIDs': [
('ids', list, []),
],
},
# groups methods
'groups': {
'get': [
('uid', int, ['optional']),
('gids', list, ['optional']),
],
'getMembers': [
('gid', int, []),
],
},
# marketplace methods
'marketplace': {
'createListing': [
('listing_id', int, []),
('show_on_profile', bool, []),
('listing_attrs', str, []),
],
'getCategories': [],
'getListings': [
('listing_ids', list, []),
('uids', list, []),
],
'getSubCategories': [
('category', str, []),
],
'removeListing': [
('listing_id', int, []),
('status', str, []),
],
'search': [
('category', str, ['optional']),
('subcategory', str, ['optional']),
('query', str, ['optional']),
],
},
# pages methods
'pages': {
'getInfo': [
('page_ids', list, ['optional']),
('uid', int, ['optional']),
],
'isAdmin': [
('page_id', int, []),
],
'isAppAdded': [
('page_id', int, []),
],
'isFan': [
('page_id', int, []),
('uid', int, []),
],
},
# photos methods
'photos': {
'addTag': [
('pid', int, []),
('tag_uid', int, [('default', 0)]),
('tag_text', str, [('default', '')]),
('x', float, [('default', 50)]),
('y', float, [('default', 50)]),
('tags', str, ['optional']),
],
'createAlbum': [
('name', str, []),
('location', str, ['optional']),
('description', str, ['optional']),
],
'get': [
('subj_id', int, ['optional']),
('aid', int, ['optional']),
('pids', list, ['optional']),
],
'getAlbums': [
('uid', int, ['optional']),
('aids', list, ['optional']),
],
'getTags': [
('pids', list, []),
],
},
# fbml methods
'fbml': {
'refreshImgSrc': [
('url', str, []),
],
'refreshRefUrl': [
('url', str, []),
],
'setRefHandle': [
('handle', str, []),
('fbml', str, []),
],
},
# SMS Methods
'sms' : {
'canSend' : [
('uid', int, []),
],
'send' : [
('uid', int, []),
('message', str, []),
('session_id', int, []),
('req_session', bool, []),
],
},
'data': {
'getCookies': [
('uid', int, []),
('string', str, []),
],
'setCookie': [
('uid', int, []),
('name', str, []),
('value', str, []),
('expires', int, ['optional']),
('path', str, ['optional']),
],
},
# connect methods
'connect': {
'registerUsers': [
('accounts', json, []),
],
'unregisterUsers': [
('email_hashes', json, []),
],
'getUnconnectedFriendsCount': [
],
},
}
class Proxy(object):
"""Represents a "namespace" of Facebook API calls."""
def __init__(self, client, name):
self._client = client
self._name = name
def __call__(self, method=None, args=None, add_session_args=True):
# for Django templates
if method is None:
return self
if add_session_args:
self._client._add_session_args(args)
return self._client('%s.%s' % (self._name, method), args)
# generate the Facebook proxies
def __generate_proxies():
for namespace in METHODS:
methods = {}
for method in METHODS[namespace]:
params = ['self']
body = ['args = {}']
for param_name, param_type, param_options in METHODS[namespace][method]:
param = param_name
for option in param_options:
if isinstance(option, tuple) and option[0] == 'default':
if param_type == list:
param = '%s=None' % param_name
body.append('if %s is None: %s = %s' % (param_name, param_name, repr(option[1])))
else:
param = '%s=%s' % (param_name, repr(option[1]))
if param_type == json:
# we only jsonify the argument if it's a list or a dict, for compatibility
body.append('if isinstance(%s, list) or isinstance(%s, dict): %s = simplejson.dumps(%s)' % ((param_name,) * 4))
if 'optional' in param_options:
param = '%s=None' % param_name
body.append('if %s is not None: args[\'%s\'] = %s' % (param_name, param_name, param_name))
else:
body.append('args[\'%s\'] = %s' % (param_name, param_name))
params.append(param)
# simple docstring to refer them to Facebook API docs
body.insert(0, '"""Facebook API call. See http://developers.facebook.com/documentation.php?v=1.0&method=%s.%s"""' % (namespace, method))
body.insert(0, 'def %s(%s):' % (method, ', '.join(params)))
body.append('return self(\'%s\', args)' % method)
exec('\n '.join(body))
methods[method] = eval(method)
proxy = type('%sProxy' % namespace.title(), (Proxy, ), methods)
globals()[proxy.__name__] = proxy
__generate_proxies()
class FacebookError(Exception):
"""Exception class for errors received from Facebook."""
def __init__(self, code, msg, args=None):
self.code = code
self.msg = msg
self.args = args
def __str__(self):
return 'Error %s: %s' % (self.code, self.msg)
class AuthProxy(Proxy):
"""Special proxy for facebook.auth."""
def getSession(self):
"""Facebook API call. See http://developers.facebook.com/documentation.php?v=1.0&method=auth.getSession"""
args = {}
try:
args['auth_token'] = self._client.auth_token
except AttributeError:
raise RuntimeError('Client does not have auth_token set.')
result = self._client('%s.getSession' % self._name, args)
self._client.session_key = result['session_key']
self._client.uid = result['uid']
self._client.secret = result.get('secret')
self._client.session_key_expires = result['expires']
return result
def createToken(self):
"""Facebook API call. See http://developers.facebook.com/documentation.php?v=1.0&method=auth.createToken"""
token = self._client('%s.createToken' % self._name)
self._client.auth_token = token
return token
class FriendsProxy(FriendsProxy):
"""Special proxy for facebook.friends."""
def get(self, **kwargs):
"""Facebook API call. See http://developers.facebook.com/documentation.php?v=1.0&method=friends.get"""
if not kwargs.get('flid') and self._client._friends:
return self._client._friends
return super(FriendsProxy, self).get(**kwargs)
class PhotosProxy(PhotosProxy):
"""Special proxy for facebook.photos."""
def upload(self, image, aid=None, caption=None, size=(604, 1024), filename=None):
"""Facebook API call. See http://developers.facebook.com/documentation.php?v=1.0&method=photos.upload
size -- an optional size (width, height) to resize the image to before uploading. Resizes by default
to Facebook's maximum display width of 604.
"""
args = {}
if aid is not None:
args['aid'] = aid
if caption is not None:
args['caption'] = caption
args = self._client._build_post_args('facebook.photos.upload', self._client._add_session_args(args))
try:
import cStringIO as StringIO
except ImportError:
import StringIO
# check for a filename specified...if the user is passing binary data in
# image then a filename will be specified
if filename is None:
try:
import Image
except ImportError:
data = StringIO.StringIO(open(image, 'rb').read())
else:
img = Image.open(image)
if size:
img.thumbnail(size, Image.ANTIALIAS)
data = StringIO.StringIO()
img.save(data, img.format)
else:
# there was a filename specified, which indicates that image was not
# the path to an image file but rather the binary data of a file
data = StringIO.StringIO(image)
image = filename
content_type, body = self.__encode_multipart_formdata(list(args.iteritems()), [(image, data)])
urlinfo = urlparse.urlsplit(self._client.facebook_url)
try:
h = httplib.HTTP(urlinfo[1])
h.putrequest('POST', urlinfo[2])
h.putheader('Content-Type', content_type)
h.putheader('Content-Length', str(len(body)))
h.putheader('MIME-Version', '1.0')
h.putheader('User-Agent', 'PyFacebook Client Library')
h.endheaders()
h.send(body)
reply = h.getreply()
if reply[0] != 200:
raise Exception('Error uploading photo: Facebook returned HTTP %s (%s)' % (reply[0], reply[1]))
response = h.file.read()
except:
# sending the photo failed, perhaps we are using GAE
try:
from google.appengine.api import urlfetch
try:
response = urlread(url=self._client.facebook_url,data=body,headers={'POST':urlinfo[2],'Content-Type':content_type,'MIME-Version':'1.0'})
except urllib2.URLError:
raise Exception('Error uploading photo: Facebook returned %s' % (response))
except ImportError:
# could not import from google.appengine.api, so we are not running in GAE
raise Exception('Error uploading photo.')
return self._client._parse_response(response, 'facebook.photos.upload')
def __encode_multipart_formdata(self, fields, files):
"""Encodes a multipart/form-data message to upload an image."""
boundary = '-------tHISiStheMulTIFoRMbOUNDaRY'
crlf = '\r\n'
l = []
for (key, value) in fields:
l.append('--' + boundary)
l.append('Content-Disposition: form-data; name="%s"' % str(key))
l.append('')
l.append(str(value))
for (filename, value) in files:
l.append('--' + boundary)
l.append('Content-Disposition: form-data; filename="%s"' % (str(filename), ))
l.append('Content-Type: %s' % self.__get_content_type(filename))
l.append('')
l.append(value.getvalue())
l.append('--' + boundary + '--')
l.append('')
body = crlf.join(l)
content_type = 'multipart/form-data; boundary=%s' % boundary
return content_type, body
def __get_content_type(self, filename):
"""Returns a guess at the MIME type of the file from the filename."""
return str(mimetypes.guess_type(filename)[0]) or 'application/octet-stream'
class Facebook(object):
"""
Provides access to the Facebook API.
Instance Variables:
added
True if the user has added this application.
api_key
Your API key, as set in the constructor.
app_name
Your application's name, i.e. the APP_NAME in http://apps.facebook.com/APP_NAME/ if
this is for an internal web application. Optional, but useful for automatic redirects
to canvas pages.
auth_token
The auth token that Facebook gives you, either with facebook.auth.createToken,
or through a GET parameter.
callback_path
The path of the callback set in the Facebook app settings. If your callback is set
to http://www.example.com/facebook/callback/, this should be '/facebook/callback/'.
Optional, but useful for automatic redirects back to the same page after login.
desktop
True if this is a desktop app, False otherwise. Used for determining how to
authenticate.
facebook_url
The url to use for Facebook requests.
facebook_secure_url
The url to use for secure Facebook requests.
in_canvas
True if the current request is for a canvas page.
internal
True if this Facebook object is for an internal application (one that can be added on Facebook)
page_id
Set to the page_id of the current page (if any)
secret
Secret that is used after getSession for desktop apps.
secret_key
Your application's secret key, as set in the constructor.
session_key
The current session key. Set automatically by auth.getSession, but can be set
manually for doing infinite sessions.
session_key_expires
The UNIX time of when this session key expires, or 0 if it never expires.
uid
After a session is created, you can get the user's UID with this variable. Set
automatically by auth.getSession.
----------------------------------------------------------------------
"""
def __init__(self, api_key, secret_key, auth_token=None, app_name=None, callback_path=None, internal=None, proxy=None, facebook_url=None, facebook_secure_url=None):
"""
Initializes a new Facebook object which provides wrappers for the Facebook API.
If this is a desktop application, the next couple of steps you might want to take are:
facebook.auth.createToken() # create an auth token
facebook.login() # show a browser window
wait_login() # somehow wait for the user to log in
facebook.auth.getSession() # get a session key
For web apps, if you are passed an auth_token from Facebook, pass that in as a named parameter.
Then call:
facebook.auth.getSession()
"""
self.api_key = api_key
self.secret_key = secret_key
self.session_key = None
self.session_key_expires = None
self.auth_token = auth_token
self.secret = None
self.uid = None
self.page_id = None
self.in_canvas = False
self.added = False
self.app_name = app_name
self.callback_path = callback_path
self.internal = internal
self._friends = None
self.proxy = proxy
if facebook_url is None:
self.facebook_url = FACEBOOK_URL
else:
self.facebook_url = facebook_url
if facebook_secure_url is None:
self.facebook_secure_url = FACEBOOK_SECURE_URL
else:
self.facebook_secure_url = facebook_secure_url
for namespace in METHODS:
self.__dict__[namespace] = eval('%sProxy(self, \'%s\')' % (namespace.title(), 'facebook.%s' % namespace))
self.auth = AuthProxy(self, 'facebook.auth')
def _hash_args(self, args, secret=None):
"""Hashes arguments by joining key=value pairs, appending a secret, and then taking the MD5 hex digest."""
# @author: houyr
# fix for UnicodeEncodeError
hasher = md5.new(''.join(['%s=%s' % (isinstance(x, unicode) and x.encode("utf-8") or x, isinstance(args[x], unicode) and args[x].encode("utf-8") or args[x]) for x in sorted(args.keys())]))
if secret:
hasher.update(secret)
elif self.secret:
hasher.update(self.secret)
else:
hasher.update(self.secret_key)
return hasher.hexdigest()
def _parse_response_item(self, node):
"""Parses an XML response node from Facebook."""
if node.nodeType == node.DOCUMENT_NODE and \
node.childNodes[0].hasAttributes() and \
node.childNodes[0].hasAttribute('list') and \
node.childNodes[0].getAttribute('list') == "true":
return {node.childNodes[0].nodeName: self._parse_response_list(node.childNodes[0])}
elif node.nodeType == node.ELEMENT_NODE and \
node.hasAttributes() and \
node.hasAttribute('list') and \
node.getAttribute('list')=="true":
return self._parse_response_list(node)
elif len(filter(lambda x: x.nodeType == x.ELEMENT_NODE, node.childNodes)) > 0:
return self._parse_response_dict(node)
else:
return ''.join(node.data for node in node.childNodes if node.nodeType == node.TEXT_NODE)
def _parse_response_dict(self, node):
"""Parses an XML dictionary response node from Facebook."""
result = {}
for item in filter(lambda x: x.nodeType == x.ELEMENT_NODE, node.childNodes):
result[item.nodeName] = self._parse_response_item(item)
if node.nodeType == node.ELEMENT_NODE and node.hasAttributes():
if node.hasAttribute('id'):
result['id'] = node.getAttribute('id')
return result
def _parse_response_list(self, node):
"""Parses an XML list response node from Facebook."""
result = []
for item in filter(lambda x: x.nodeType == x.ELEMENT_NODE, node.childNodes):
result.append(self._parse_response_item(item))
return result
def _check_error(self, response):
"""Checks if the given Facebook response is an error, and then raises the appropriate exception."""
if type(response) is dict and response.has_key('error_code'):
raise FacebookError(response['error_code'], response['error_msg'], response['request_args'])
def _build_post_args(self, method, args=None):
"""Adds to args parameters that are necessary for every call to the API."""
if args is None:
args = {}
for arg in args.items():
if type(arg[1]) == list:
args[arg[0]] = ','.join(str(a) for a in arg[1])
elif type(arg[1]) == unicode:
args[arg[0]] = arg[1].encode("UTF-8")
elif type(arg[1]) == bool:
args[arg[0]] = str(arg[1]).lower()
args['method'] = method
args['api_key'] = self.api_key
args['v'] = '1.0'
args['format'] = RESPONSE_FORMAT
args['sig'] = self._hash_args(args)
return args
def _add_session_args(self, args=None):
"""Adds 'session_key' and 'call_id' to args, which are used for API calls that need sessions."""
if args is None:
args = {}
if not self.session_key:
return args
#some calls don't need a session anymore. this might be better done in the markup
#raise RuntimeError('Session key not set. Make sure auth.getSession has been called.')
args['session_key'] = self.session_key
args['call_id'] = str(int(time.time() * 1000))
return args
def _parse_response(self, response, method, format=None):
"""Parses the response according to the given (optional) format, which should be either 'JSON' or 'XML'."""
if not format:
format = RESPONSE_FORMAT
if format == 'JSON':
result = simplejson.loads(response)
self._check_error(result)
elif format == 'XML':
dom = minidom.parseString(response)
result = self._parse_response_item(dom)
dom.unlink()
if 'error_response' in result:
self._check_error(result['error_response'])
result = result[method[9:].replace('.', '_') + '_response']
else:
raise RuntimeError('Invalid format specified.')
return result
def hash_email(self, email):
"""
Hash an email address in a format suitable for Facebook Connect.
"""
email = email.lower().strip()
return "%s_%s" % (
struct.unpack("I", struct.pack("i", binascii.crc32(email)))[0],
hashlib.md5(email).hexdigest(),
)
def unicode_urlencode(self, params):
"""
@author: houyr
A unicode aware version of urllib.urlencode.
"""
if isinstance(params, dict):
params = params.items()
return urllib.urlencode([(k, isinstance(v, unicode) and v.encode('utf-8') or v)
for k, v in params])
def __call__(self, method=None, args=None, secure=False):
"""Make a call to Facebook's REST server."""
# for Django templates, if this object is called without any arguments
# return the object itself
if method is None:
return self
# @author: houyr
# fix for bug of UnicodeEncodeError
post_data = self.unicode_urlencode(self._build_post_args(method, args))
if self.proxy:
proxy_handler = urllib2.ProxyHandler(self.proxy)
opener = urllib2.build_opener(proxy_handler)
if secure:
response = opener.open(self.facebook_secure_url, post_data).read()
else:
response = opener.open(self.facebook_url, post_data).read()
else:
if secure:
response = urlread(self.facebook_secure_url, post_data)
else:
response = urlread(self.facebook_url, post_data)
return self._parse_response(response, method)
# URL helpers
def get_url(self, page, **args):
"""
Returns one of the Facebook URLs (www.facebook.com/SOMEPAGE.php).
Named arguments are passed as GET query string parameters.
"""
return 'http://www.facebook.com/%s.php?%s' % (page, urllib.urlencode(args))
def get_app_url(self, path=''):
"""
Returns the URL for this app's canvas page, according to app_name.
"""
return 'http://apps.facebook.com/%s/%s' % (self.app_name, path)
def get_add_url(self, next=None):
"""
Returns the URL that the user should be redirected to in order to add the application.
"""
args = {'api_key': self.api_key, 'v': '1.0'}
if next is not None:
args['next'] = next
return self.get_url('install', **args)
def get_authorize_url(self, next=None, next_cancel=None):
"""
Returns the URL that the user should be redirected to in order to
authorize certain actions for application.
"""
args = {'api_key': self.api_key, 'v': '1.0'}
if next is not None:
args['next'] = next
if next_cancel is not None:
args['next_cancel'] = next_cancel
return self.get_url('authorize', **args)
def get_login_url(self, next=None, popup=False, canvas=True):
"""
Returns the URL that the user should be redirected to in order to login.
next -- the URL that Facebook should redirect to after login
"""
args = {'api_key': self.api_key, 'v': '1.0'}
if next is not None:
args['next'] = next
if canvas is True:
args['canvas'] = 1
if popup is True:
args['popup'] = 1
if self.auth_token is not None:
args['auth_token'] = self.auth_token
return self.get_url('login', **args)
def login(self, popup=False):
"""Open a web browser telling the user to login to Facebook."""
import webbrowser
webbrowser.open(self.get_login_url(popup=popup))
def get_ext_perm_url(self, ext_perm, next=None, popup=False):
"""
Returns the URL that the user should be redirected to in order to grant an extended permission.
ext_perm -- the name of the extended permission to request
next -- the URL that Facebook should redirect to after login
"""
args = {'ext_perm': ext_perm, 'api_key': self.api_key, 'v': '1.0'}
if next is not None:
args['next'] = next
if popup is True:
args['popup'] = 1
return self.get_url('authorize', **args)
def request_extended_permission(self, ext_perm, popup=False):
"""Open a web browser telling the user to grant an extended permission."""
import webbrowser
webbrowser.open(self.get_ext_perm_url(ext_perm, popup=popup))
def check_session(self, request):
"""
Checks the given Django HttpRequest for Facebook parameters such as
POST variables or an auth token. If the session is valid, returns True
and this object can now be used to access the Facebook API. Otherwise,
it returns False, and the application should take the appropriate action
(either log the user in or have him add the application).
"""
self.in_canvas = (request.POST.get('fb_sig_in_canvas') == '1')
if self.session_key and (self.uid or self.page_id):
return True
if request.method == 'POST':
params = self.validate_signature(request.POST)
else:
if 'installed' in request.GET:
self.added = True
if 'fb_page_id' in request.GET:
self.page_id = request.GET['fb_page_id']
if 'auth_token' in request.GET:
self.auth_token = request.GET['auth_token']
try:
self.auth.getSession()
except FacebookError, e:
self.auth_token = None
return False
return True
params = self.validate_signature(request.GET)
if not params:
# first check if we are in django - to check cookies
if hasattr(request, 'COOKIES'):
params = self.validate_cookie_signature(request.COOKIES)
else:
# if not, then we might be on GoogleAppEngine, check their request object cookies
if hasattr(request,'cookies'):
params = self.validate_cookie_signature(request.cookies)
if not params:
return False
if params.get('in_canvas') == '1':
self.in_canvas = True
if params.get('added') == '1':
self.added = True
if params.get('expires'):
self.session_key_expires = int(params['expires'])
if 'friends' in params:
if params['friends']:
self._friends = params['friends'].split(',')
else:
self._friends = []
if 'session_key' in params:
self.session_key = params['session_key']
if 'user' in params:
self.uid = params['user']
elif 'page_id' in params:
self.page_id = params['page_id']
else:
return False
elif 'profile_session_key' in params:
self.session_key = params['profile_session_key']
if 'profile_user' in params:
self.uid = params['profile_user']
else:
return False
else:
return False
return True
def validate_signature(self, post, prefix='fb_sig', timeout=None):
"""
Validate parameters passed to an internal Facebook app from Facebook.
"""
args = post.copy()
if prefix not in args:
return None
del args[prefix]
if timeout and '%s_time' % prefix in post and time.time() - float(post['%s_time' % prefix]) > timeout:
return None
args = dict([(key[len(prefix + '_'):], value) for key, value in args.items() if key.startswith(prefix)])
hash = self._hash_args(args)
if hash == post[prefix]:
return args
else:
return None
def validate_cookie_signature(self, cookies):
"""
Validate parameters passed by cookies, namely facebookconnect or js api.
"""
if not self.api_key in cookies.keys():
return None
sigkeys = []
params = dict()
for k in sorted(cookies.keys()):
if k.startswith(self.api_key+"_"):
sigkeys.append(k)
params[k.replace(self.api_key+"_","")] = cookies[k]
vals = ''.join(['%s=%s' % (x.replace(self.api_key+"_",""), cookies[x]) for x in sigkeys])
hasher = md5.new(vals)
hasher.update(self.secret_key)
digest = hasher.hexdigest()
if digest == cookies[self.api_key]:
return params
else:
return False
######## Note: This code was not written by facebook
def check_connect_session(self, request):
"""
For use in a facebook Connect application running in Google App Engine
Takes a Google App Engine Request
http://code.google.com/appengine/docs/webapp/requestclass.html
and determines if the current user has a valid session
"""
# our session is stored in cookies - validate them
params = self.validate_cookie(request.cookies)
if not params:
return False
if params.get('expires'):
self.session_key_expires = int(params['expires'])
if 'session_key' in params and 'user' in params:
self.session_key = params['session_key']
self.uid = params['user']
else:
return False
return True
def validate_cookie(self, cookies):
"""
Validates parameters passed to a Facebook connect app through cookies
"""
# check for the hashed secret
if self.api_key not in cookies:
return None
# create a dict of the elements that start with the api_key
# the resultant dict removes the self.api_key from the beginning
args = dict([(key[len(self.api_key) + 1:], value)
for key, value in cookies.items()
if key.startswith(self.api_key + "_")])
# check the hashes match before returning them
if self._hash_args(args) == cookies[self.api_key]:
return args
return None
if __name__ == '__main__':
# sample desktop application
api_key = ''
secret_key = ''
facebook = Facebook(api_key, secret_key)
facebook.auth.createToken()
# Show login window
# Set popup=True if you want login without navigational elements
facebook.login()
# Login to the window, then press enter
print 'After logging in, press enter...'
raw_input()
facebook.auth.getSession()
print 'Session Key: ', facebook.session_key
print 'Your UID: ', facebook.uid
info = facebook.users.getInfo([facebook.uid], ['name', 'birthday', 'affiliations', 'sex'])[0]
print 'Your Name: ', info['name']
print 'Your Birthday: ', info['birthday']
print 'Your Gender: ', info['sex']
friends = facebook.friends.get()
friends = facebook.users.getInfo(friends[0:5], ['name', 'birthday', 'relationship_status'])
for friend in friends:
print friend['name'], 'has a birthday on', friend['birthday'], 'and is', friend['relationship_status']
arefriends = facebook.friends.areFriends([friends[0]['uid']], [friends[1]['uid']])
photos = facebook.photos.getAlbums(facebook.uid)
| Python |
# Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Data needed for live vs dev deployment.
In order to run this application, you will need the private_keys.py
file which contains the Facebook API "Application Secret" string and
other confidential config settings.
Contact footprint-eng@googlegroups.com to get this file.
"""
import os
import logging
import private_keys
PRODUCTION_DOMAINS = ['allforgood.org', 'footprint2009qa.appspot.com']
# pylint: disable-msg=C0301
MAPS_API_KEYS = {
'allforgood.org' : 'ABQIAAAAHtEBbyenR4BaYGl54_p0fRQu5fCZl1K7T-61hQb7PrEsg72lpRQbhbBcd0325oSLzGUQxP7Nz9Rquw',
'footprint2009qa.appspot.com' : 'ABQIAAAA1sNtdnui_8Lmt75VBAosOhRSEEb9tdSIuCkRNLnpLNbLMSh74BRy7tIEe3Z6GgLCRLUFTTQ45vQ3mg',
'footprint-loadtest.appspot.com' : 'ABQIAAAAxq97AW0x5_CNgn6-nLxSrxSWKH9akPVZO-6F_G0PvWoeHNZVdRSifDQCrd-osJFuWDqR3Oh0nKDgbw',
'footprint2009dev.appspot.com' : 'ABQIAAAAxq97AW0x5_CNgn6-nLxSrxTpeCj-9ism2i6Mt7fLlVoN6HsfDBSOZjcyagWjKTMT32rzg71rFenopA'
}
# pylint: enable-msg=C0301
# Google Analytics keys - only needed for dev, qa, and production
# we don't want to track in other instances
GA_KEYS = {
'allforgood.org' : 'UA-8689219-2',
'footprint2009dev.appspot.com' : 'UA-8689219-3',
'footprint2009qa.appspot.com' : 'UA-8689219-4'
}
# These are the public Facebook API keys.
DEFAULT_FACEBOOK_API_KEY = 'df68a40a4a90d4495ed03f920f16c333'
FACEBOOK_API_KEYS = {
'allforgood.org': '628524bbaf79da8a8a478e5ef49fb84f',
'footprint2009qa.appspot.com': '213e79302371015635ab5707d691143f'
}
FACEBOOK_API_KEY = None
FACEBOOK_SECRET = None
MAPS_API_KEY = None
GA_KEY = None
def host_sans_www():
"""Return the host name without any leading 'www.'"""
http_host = os.environ.get('HTTP_HOST')
# Remove www. at the beginning if it's there
if (http_host[:4]=='www.'):
http_host = http_host[4:]
return http_host
def is_production_site():
"""is this a production instance?"""
return host_sans_www() in PRODUCTION_DOMAINS
def is_local_development():
"""is this running on a development server (and not appspot.com)"""
return (os.environ.get('SERVER_SOFTWARE').find("Development")==0)
def load_keys():
"""load facebook, maps, etc. keys."""
global FACEBOOK_API_KEY, FACEBOOK_SECRET, GA_KEY, MAPS_API_KEY
if FACEBOOK_API_KEY or MAPS_API_KEY or FACEBOOK_SECRET or GA_KEY:
return
if is_local_development():
# to define your own keys, modify local_keys.py-- ok to checkin.
local_keys = __import__('local_keys')
try:
MAPS_API_KEYS.update(local_keys.MAPS_API_KEYS)
except:
logging.info("local_keys.MAPS_API_KEYS not defined")
try:
FACEBOOK_API_KEYS.update(local_keys.FACEOOK_API_KEYS)
except:
logging.info("local_keys.FACEBOOK_API_KEYS not defined")
# no default for maps api-- has to match
http_host = host_sans_www()
MAPS_API_KEY = MAPS_API_KEYS.get(http_host, 'unknown')
logging.debug("host=" + http_host + " maps api key=" + MAPS_API_KEY)
# no default for ga key
GA_KEY = GA_KEYS.get(http_host, 'unknown')
logging.debug("host=" + http_host + " ga key=" + GA_KEY)
# facebook API has default key
FACEBOOK_API_KEY = FACEBOOK_API_KEYS.get(http_host, DEFAULT_FACEBOOK_API_KEY)
logging.debug("host=" + http_host + " facebook key=" + FACEBOOK_API_KEY)
# facebook secret keys are a special case
FACEBOOK_SECRET = private_keys.FACEBOOK_SECRETS.get(http_host,
private_keys.DEFAULT_FACEBOOK_SECRET)
def load_standard_template_values(template_values):
"""set template_values[...] for various keys"""
load_keys()
template_values['maps_api_key'] = MAPS_API_KEY
template_values['facebook_key'] = FACEBOOK_API_KEY
template_values['ga_key'] = GA_KEY
def get_facebook_secret():
"""Returns the facebook secret key"""
load_keys()
return FACEBOOK_SECRET
def get_facebook_key():
"""Returns the facebook public key"""
load_keys()
return FACEBOOK_API_KEY
| Python |
# Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
main() for autocomplete handler
"""
# view classes aren inherently not pylint-compatible
# pylint: disable-msg=C0103
# pylint: disable-msg=W0232
# pylint: disable-msg=E1101
# pylint: disable-msg=R0903
import os
import logging
from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import run_wsgi_app
AUTOCOMPLETE_FILENAME = "popular_words.txt"
POPULAR_WORDS = None
DEFAULT_MAXRESULTS = 10
MAX_MAXRESULTS = 100
class Query(webapp.RequestHandler):
"""prefix query on autocomplete."""
def get(self):
"""HTTP get method."""
global POPULAR_WORDS
reload_words = self.request.get('reload_words')
if POPULAR_WORDS == None or reload_words == "1":
logging.info("reloading words...")
POPULAR_WORDS = []
path = os.path.join(os.path.dirname(__file__), AUTOCOMPLETE_FILENAME)
fh = open(path, 'r')
for line in fh:
count, word = line.rstrip('\n\r').split("\t")
count = count # shutup pylint
POPULAR_WORDS.append(word)
logging.info("loaded %d words." % len(POPULAR_WORDS))
querystr = self.request.get('q') or ""
querystr = querystr.strip().lower()
if querystr == "":
self.response.headers['Content-Type'] = 'text/plain'
self.response.out.write("please provide &q= to query the autocompleter.")
return
maxresultstr = self.request.get('maxresults')
try:
maxwords = int(maxresultstr)
except:
maxwords = DEFAULT_MAXRESULTS
if maxwords > MAX_MAXRESULTS:
maxwords = MAX_MAXRESULTS
elif maxwords < 1:
maxwords = 1
outstr = ""
numresults = 0
for word in POPULAR_WORDS:
if word.find(querystr) == 0:
outstr += word + "\n"
numresults += 1
if numresults >= maxwords:
break
self.response.headers['Content-Type'] = 'text/plain'
self.response.out.write(outstr)
APP = webapp.WSGIApplication(
[('/autocomplete/query', Query)],
debug=True)
def main():
"""main for standalone execution."""
run_wsgi_app(APP)
if __name__ == "__main__":
main()
| Python |
#!/usr/bin/python2.5
# Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Datastore helper methods."""
import datetime
import logging
from google.appengine.api import memcache
from google.appengine.ext import db
def set_entity_attributes(entity, absolute_attributes, relative_attributes):
"""Set entity attributes, using absolute or relative values.
Args:
model_class: model class
key: Entity key.
absolute_attributes: Dictionary of attr_name:value pairs to set.
relative_attributes: Dictionary of attr_name:value pairs to set as
relative to current value. If some attr_name appears in both
the absolute and relative dictionaries, the absolute is set first.
Returns:
On error: (None, None)
On success: (entity, deltas)
entity: The entity after applying the changes
deltas: Dict of attr_name:delta_values, where each delta shows how
the change in value in the respective attribute.
"""
if not absolute_attributes:
absolute_attributes = {}
if not relative_attributes:
relative_attributes = {}
def txn(entity):
# Passed 'entity' as function parameter because of python scope rules.
entity = entity.get(entity.key())
# Initialize the deltas list with starting values. Also, set any undefined
# attribute to zero.
deltas = {}
combined_attributes = (set([x for x in absolute_attributes.iterkeys()]) |
set([x for x in relative_attributes.iterkeys()]))
for attr in combined_attributes:
if not getattr(entity, attr):
setattr(entity, attr, 0) # Ensure all attributes are defined.
deltas[attr] = 0
else:
deltas[attr] = getattr(entity, attr)
# Set absolute values first.
for attr in absolute_attributes.iterkeys():
setattr(entity, attr, absolute_attributes[attr])
# Set relative values.
for attr in relative_attributes.iterkeys():
# Here, we know getattr() is defined, since we initialized all undefined
# attributes at the top of this function.
setattr(entity, attr, getattr(entity, attr) + relative_attributes[attr])
# Compute the final delta value for each attribute.
for attr in combined_attributes:
deltas[attr] = getattr(entity, attr) - deltas[attr]
entity.put()
return (entity, deltas)
try:
return_value = db.run_in_transaction(txn, entity)
return return_value
except Exception:
logging.exception('set_entity_attributes failed for key %s' %
entity.key().id_or_name())
return (None, None)
def get_by_ids(cls, ids):
"""Gets multiple entities for IDs, trying memcache then datastore.
Args:
cls: Model class
ids: list of ids.
Returns:
Dictionary of results, id:model.
"""
results = memcache.get_multi(ids, cls.MEMCACHE_PREFIX + ':')
datastore_prefix = cls.DATASTORE_PREFIX
missing_ids = []
for id in ids:
if not id in results:
missing_ids.append(datastore_prefix + id)
datastore_results = cls.get_by_key_name(missing_ids)
for result in datastore_results:
if result:
result_id = result.key().name()[len(datastore_prefix):]
results[result_id] = result
return results
| Python |
# Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
export main().
"""
import re
import logging
import hashlib
from datetime import datetime
from string import strip
from google.appengine.ext import db
from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import run_wsgi_app
import utils
import models
import posting
from fastpageviews import pagecount
from testapi import helpers
QT = "%s%s%s" % ("ec813d6d0c96f3a562c70d78b7ac98d7ec2cfcaaf44cbd7",
"ac897ca3481e27a777398da97d0b93bbe0f5633f6203ff3",
"b77ea55f62cf002ad7e4b5ec3f89d18954")
# http://code.google.com/appengine/docs/python/datastore/typesandpropertyclasses.html
MAX_FIELD_TYPES = 8
FIELD_TYPE_BOOL = 0
FIELD_TYPE_INT = 1
FIELD_TYPE_LONG = 2
FIELD_TYPE_FLOAT = 3
FIELD_TYPE_STR = 4
FIELD_TYPE_DATETIME = 5
FIELD_TYPE_DATE = 6
FIELD_TYPE_REF = 7
ROW_MARKER_LEN = 4
ROW_MARKER = "row="
USAGE = """
<pre>
/export/TABLENAME.tsv, eg. UserStats.tsv
/export/TABLENAME/TABLENAME_BACKUP, eg. UserInfo/UserInfo_20090416
</pre>
"""
class Fail(Exception):
"""
handle errors
"""
def __init__(self, message):
pagecount.IncrPageCount("export.Fail", 1)
if hasattr(Exception, '__init__'):
Exception.__init__(self)
logging.error("see /export/ for usage")
logging.error(message)
class ShowUsage(webapp.RequestHandler):
""" show user how to export a table """
def __init__(self):
if hasattr(webapp.RequestHandler, '__init__'):
webapp.RequestHandler.__init__(self)
def response(self):
""" pylint wants a public reponse method """
webapp.RequestHandler.__response__(self)
def get(self):
""" show the usage string """
pagecount.IncrPageCount("export.ShowUsage", 1)
self.response.out.write(USAGE)
def verify_dig_sig(request, caller):
"""
require callers pass param &digsig=[string] such that
the hash of the string they pass to us equals QT
"""
digsig = utils.get_last_arg(request, "digsig", "")
if hashlib.sha512(digsig).hexdigest() != QT:
pagecount.IncrPageCount("export.%s.noDigSig" % caller, 1)
raise Fail("no &digsig")
def get_limit(request, caller):
""" get our limit """
try:
limit = int(utils.get_last_arg(request, "limit", "1000"))
except:
pagecount.IncrPageCount("export.%s.nonIntLimit" % caller, 1)
raise Fail("non integer &limit")
if limit < 1:
# 1000 is the max records that can be fetched ever
limit = 1000
return limit
def get_model(table, caller):
""" get our model """
if table == "UserInfo":
model = models.UserInfo
elif table == "UserStats":
model = models.UserStats
elif table == "UserInterest":
model = models.UserInterest
elif table == "VolunteerOpportunityStats":
model = models.VolunteerOpportunityStats
elif table == "VolunteerOpportunity":
model = models.VolunteerOpportunity
elif table == "Config":
model = config.Config
elif table == "Posting":
model = posting.Posting
elif table == "PageCountShard":
model = pagecount.PageCountShard
elif table == "TestResults":
model = helpers.TestResults
else:
pagecount.IncrPageCount("export.%s.unknownTable" % caller, 1)
raise Fail("unknown table name '%s'" % table)
return model
def get_min_key(table, min_key = ""):
"""
get the next key in our sequence
or get the lowest key value in the table
"""
if min_key == "":
query = table.gql("ORDER BY __key__ LIMIT 1")
row = query.get()
else:
row = table(key_name = min_key)
if not row:
if min_key == "":
raise Fail("no data in %s" % table)
else:
return None
return row.key()
def export_table_as_tsv(table, min_key, limit):
"""
get rows from this table as TSV
"""
delim, recsep = ("\t", "\n")
def get_fields(table_object):
""" get a list of field names prepended with 'key' """
fields = ["key"]
for field in table_object.properties():
fields.append(field)
return fields
def field_to_str(value):
""" get our field value as a string """
if not value:
field_value = ""
else:
try:
# could be a key or a Reference object, eg
# <models.UserInfo object at 0x94bed32057743898>
field_value = str(value.key().id_or_name())
except:
field_value = str(value)
return field_value
def get_header(fields, delim):
""" get a delimited list of the field names """
header = delim.join(fields)
return header
def esc_value(value, delim, recsep):
""" make sure our delimiter and record separator are not in the data """
return field_to_str(value).replace(delim, "\\t").replace(recsep, "\\n")
fields = get_fields(table)
output = []
if min_key == "":
# this is the first record we output so add the header
output.append(get_header(fields, delim))
inequality = ">="
else:
inequality = ">"
try:
query = table.gql(("WHERE __key__ %s :1 ORDER BY __key__" % inequality),
get_min_key(table, min_key))
except:
query = None
if query:
rsp = query.fetch(limit)
for row in rsp:
line = []
for field in fields:
if field == "key":
value = row.key().id_or_name()
else:
value = getattr(row, field, None)
line.append(esc_value(value, delim, recsep))
output.append(delim.join(line))
return "%s%s" % (recsep.join(output), recsep)
class ExportTableTSV(webapp.RequestHandler):
""" export the data in the table """
def __init__(self):
if hasattr(webapp.RequestHandler, '__init__'):
webapp.RequestHandler.__init__(self)
def request(self):
""" pylint wants a public request method """
webapp.RequestHandler.__response__(self)
def response(self):
""" pylint wants a public response method """
webapp.RequestHandler.__response__(self)
def get(self, table):
""" handle the request to export the table """
pagecount.IncrPageCount("export.ExportTableTSV.attempt", 1)
verify_dig_sig(self.request, "ExportTableTSV")
limit = get_limit(self.request, "ExportTableTSV")
min_key = utils.get_last_arg(self.request, "min_key", "")
model = get_model(table, "ExportTableTSV")
self.response.out.write(export_table_as_tsv(model, min_key, limit))
pagecount.IncrPageCount("export.ExportTableTSV.success", 1)
def transfer_table(source, destination, min_key, limit):
""" transfer records from source to destination """
last_key = ""
number_of_rows = 0
def populate_row(src_table, dest_table, row, key = None):
""" put a row from the src_table into the dest_table """
if key:
record = dest_table(key_name = str(key))
else:
record = dest_table()
for field in src_table.properties():
setattr(record, field, getattr(row, field))
record.put()
if min_key == "":
# this is the first record
inequality = ">="
else:
inequality = ">"
query = source.gql(("WHERE __key__ %s :1 ORDER BY __key__" % inequality),
get_min_key(source, min_key))
rsp = query.fetch(limit)
for row in rsp:
last_key = row.key().id_or_name()
try:
# try to preserve our key name
populate_row(source, destination, row, last_key)
number_of_rows += 1
except:
populate_row(source, destination, row)
number_of_rows += 1
return last_key, number_of_rows
def verify_table_name(table_to):
""" make sure this table name is safe to use """
good_chars = re.compile(r'[A-Za-z0-9_]')
good_name = ''.join(c for c in table_to if good_chars.match(c))
if table_to != good_name:
pagecount.IncrPageCount("export.TransferTable.badDestName", 1)
raise Fail("destination contains nonalphanumerics '%s'" % table_to)
class TransferTable(webapp.RequestHandler):
""" export the data in the table """
def __init__(self):
if hasattr(webapp.RequestHandler, '__init__'):
webapp.RequestHandler.__init__(self)
def request(self):
""" pylint wants a public request method """
webapp.RequestHandler.__response__(self)
def response(self):
""" pylint wants a public response method """
webapp.RequestHandler.__response__(self)
def get(self, table_from, table_to):
""" handle the request to replicate a table """
pagecount.IncrPageCount("export.TransferTable.attempt", 1)
verify_dig_sig(self.request, "TransferTable")
limit = get_limit(self.request, "TransferTable")
min_key = utils.get_last_arg(self.request, "min_key", "")
if table_from == table_to:
pagecount.IncrPageCount("export.TransferTable.sameTableName", 1)
raise Fail("cannot transfer '%s' to itself" % table_from)
if (table_to[0:len(table_from)] + '_') != (table_from + '_'):
raise Fail("destination must start with '%s_'" % table_from)
verify_table_name(table_to)
# match our type of table
source = get_model(table_from, "TransferTable")
destination = type(table_to, (source,), {})
if min_key == "":
# a blank key means that we are starting at the top of the table
# so we need to clean out anything that may already be in
# the destination table
while True:
query = destination.all()
# 500 records is the max
results = query.fetch(500)
if results:
db.delete(results)
else:
break
last_key, rows = transfer_table(source, destination, min_key, limit)
self.response.out.write("from %s to %s\nrows\t%d\nlast_key\t%s\n"
% (table_from, table_to, rows, last_key))
pagecount.IncrPageCount("export.TransferTable.success", 1)
class PopulateTable(webapp.RequestHandler):
""" populate the data in the table """
def __init__(self):
if hasattr(webapp.RequestHandler, '__init__'):
webapp.RequestHandler.__init__(self)
def request(self):
""" pylint wants a public request method """
webapp.RequestHandler.__response__(self)
def response(self):
""" pylint wants a public response method """
webapp.RequestHandler.__response__(self)
def post(self, table):
""" handle the request to populate the table """
pagecount.IncrPageCount("export.PopulateTable.attempt", 1)
verify_dig_sig(self.request, "PopulateTable")
table_version = str(utils.get_last_arg(self.request, "tv", ""))
if len(table_version) > 0:
verify_table_name(table_version)
source = get_model(table, "PopulateTable")
destination = type(table + table_version, (source,), {})
else:
destination = get_model(table, "PopulateTable")
# handle reference properties
def ref_property_UserInfo(field):
rmodel = type('UserInfo' + table_version, (models.UserInfo,), {})
return rmodel.get_by_key_name(field)
def nop(v):
""" this is used for unknown field types """
return v
def str_to_datetime(datetimestring):
""" convert string to a real DateTime object """
# dont need milliseconds here
ar = datetimestring.split(".")
datetime_format = "%Y-%m-%d %H:%M:%S"
return datetime.strptime(ar[0], datetime_format)
def str_to_date(datestring):
""" convert string to a real Date object """
date_format = "%Y-%m-%d"
return datetime.strptime(datestring, date_format).date()
try:
reset = int(utils.get_last_arg(self.request, "reset", "0"))
except:
pagecount.IncrPageCount("export.%s.nonIntLimit" % "PopulateTable", 1)
raise Fail("invalid &reset signal")
if reset == 1:
""" we should only see this with a first batch of records """
logging.info("export.PopulateTable reset signal recvd for %s%s"
% (table, table_version))
self.response.out.write(
"PopulateTable: reset signal recvd, clearing all rows\n")
pagecount.IncrPageCount("export.%s.reset" % "PopulateTable", 1)
while True:
query = destination.all()
# cannot delete more than 500 entities in a single call
# and if there are a lot here we are going to timeout
# anyway but better to try and fail than risk duplicating
results = query.fetch(500)
if results:
logging.info("export.PopulateTable deleting %d from %s%s" %
(len(results), table, table_version))
self.response.out.write("PopulateTable: deleting %d from %s%s\n"
% (len(results), table, table_version))
db.delete(results)
else:
logging.info("export.PopulateTable %s%s reset complete" %
(table, table_version))
self.response.out.write("PopulateTable: %s%s reset complete\n" %
(table, table_version))
break
# one record per line
rows = self.request.get("row").split("\n")
# the first row is a header
header = rows.pop(0).split("\t")
field_type = []
for field in header:
# we are going to want to remember a function for each field type
# but for now all we are doing is initializing the list
field_type.append(None)
limit = get_limit(self.request, "PopulateTable")
logging.info("export.PopulateTable write to %s%s" % (table, table_version))
written = 0
row_number = 0
for row in rows:
row_number += 1
# all of our kind of lines should start "row="
if len(row) > ROW_MARKER_LEN and row[0:ROW_MARKER_LEN] == ROW_MARKER:
fields = row[ROW_MARKER_LEN:].split("\t")
for i, field in enumerate(fields):
if i == 0:
# on the first column (key) we only instantiate our kind of record
try:
# it could be a named key
if not str(field)[0].isdigit():
record = destination(key_name = str(field))
else:
record = destination()
except:
record = destination()
else:
if field is None or len(strip(field)) < 1:
# no field/field value, nothing to do
continue
if field_type[i] != None:
# we think we already know what kind of field this is
try:
# but we could be wrong
setattr(record, header[i], field_type[i](field))
except:
# nothing we can really do about it now except carry on
# and see if we can still make this a good record
logging.warning(
"export.PopulateTable %s = %s not set in row %d of %s%s" %
(header[i], field, row_number, table, table_version))
self.response.out.write("field %s = %s not set in row %d of %s%s\n" %
(header[i], field, row_number, table, table_version))
pass
else:
# on the first row of the file
# we dont know what type of field this is
# but we can try them all until we succeed
# and remember which one worked for subsequent rows
n = 0
while n < MAX_FIELD_TYPES:
if n == FIELD_TYPE_REF:
if table != "UserInterest" or header[i] != "user":
continue
setattr(record, header[i], ref_property_UserInfo(field))
field_type[i] = ref_property_UserInfo
break
elif n == FIELD_TYPE_DATETIME:
try:
setattr(record, header[i], str_to_datetime(field))
field_type[i] = str_to_datetime
break
except:
pass
elif n == FIELD_TYPE_DATE:
try:
setattr(record, header[i], str_to_date(field))
field_type[i] = str_to_date
break
except:
pass
elif n == FIELD_TYPE_STR:
try:
setattr(record, header[i], field)
field_type[i] = str
break
except:
pass
elif n == FIELD_TYPE_BOOL:
try:
setattr(record, header[i], bool(field))
field_type[i] = bool
break
except:
pass
elif n == FIELD_TYPE_INT:
try:
setattr(record, header[i], int(field))
field_type[i] = int
break
except:
pass
elif n == FIELD_TYPE_LONG:
try:
setattr(record, header[i], long(field))
field_type[i] = long
break
except:
pass
elif n == FIELD_TYPE_FLOAT:
try:
setattr(record, header[i], float(field))
field_type[i] = float
break
except:
pass
n += 1
if n >= MAX_FIELD_TYPES:
logging.warning(
"export.PopulateTable unknown field type %s in %s%s" %
(header[i], table, table_version))
self.response.out.write("unknown field type %s in %s%s\n" %
(header[i], table, table_version))
field_type[i] = nop
else:
logging.debug("%s is type %d\n" % (header[i], n))
# end-of for each field
try:
# ready to attempt a put
record.put()
written += 1
if written >= limit:
break
except:
logging.error("export.PopulateTable put failed at row %d in %s%s" %
(row_number, table, table_version))
self.response.out.write("put failed at row %d in %s%s\n" %
(row_number, table, table_version))
# end-of for each row
logging.info("export.PopulateTable wrote %d rows to %s%s" %
(written, table, table_version))
self.response.out.write("wrote %d rows to %s%s\n" %
(written, table, table_version))
pagecount.IncrPageCount("export.PopulateTable.success", 1)
class ClearTable(webapp.RequestHandler):
""" clear all data from a table """
def __init__(self):
if hasattr(webapp.RequestHandler, '__init__'):
webapp.RequestHandler.__init__(self)
def request(self):
""" pylint wants a public request method """
webapp.RequestHandler.__response__(self)
def response(self):
""" pylint wants a public response method """
webapp.RequestHandler.__response__(self)
def get(self, table):
""" clear data """
pagecount.IncrPageCount("export.ClearTable", 1)
verify_dig_sig(self.request, "ClearTable")
table_version = str(utils.get_last_arg(self.request, "tv", ""))
if len(table_version) > 0:
source = get_model(table, "ClearTable")
destination = type(table + table_version, (source,), {})
else:
destination = get_model(table, "ClearTable")
limit = get_limit(self.request, "ClearTable")
if limit < 1:
limit = 500
elif limit > 500:
limit = 500
query = destination.all()
# cannot delete more than 500 entities in a single call
results = query.fetch(limit)
if results:
self.response.out.write("ClearTable: deleting %d from %s%s\n"
% (len(results), table, table_version))
db.delete(results)
else:
self.response.out.write("ClearTable: %s%s clear complete\n" %
(table, table_version))
"""
TODO:
/exportapi/export/<tablename>
/exportapi/import/<tablename>
/exportapi/clear/<tablename>
/exportapi/transfer/<from-table>/<to-table>
"""
APPLICATION = webapp.WSGIApplication(
[ ("/export/(.*?)\.tsv", ExportTableTSV),
("/export/-/(.*?)", PopulateTable),
("/export/-clear-/(.*?)", ClearTable),
("/export/(.*?)/(.*?)", TransferTable),
("/export/", ShowUsage)
], debug=True)
def main():
""" execution begins """
run_wsgi_app(APPLICATION)
if __name__ == "__main__":
main()
| Python |
# Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
appengine main().
"""
from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import run_wsgi_app
import os
import logging
import views
import urls
import deploy
APPLICATION = webapp.WSGIApplication(
[(urls.URL_HOME, views.home_page_view),
(urls.URL_PSA, views.home_page_view),
# TODO: replace with a generic way to redirect all unknown pages to /
(urls.URL_OLD_HOME, views.home_page_redir_view),
(urls.URL_CONSUMER_UI_SEARCH, views.consumer_ui_search_view),
(urls.URL_CONSUMER_UI_SEARCH_REDIR, views.consumer_ui_search_redir_view),
(urls.URL_API_SEARCH, views.search_view),
(urls.URL_UI_SNIPPETS, views.ui_snippets_view),
(urls.URL_UI_MY_SNIPPETS, views.ui_my_snippets_view),
(urls.URL_MY_EVENTS, views.my_events_view),
(urls.URL_ACTION, views.action_view),
(urls.URL_ADMIN, views.admin_view),
#(urls.URL_POST, views.post_view), USER POSTING - currently disabled
(urls.URL_REDIRECT, views.redirect_view),
(urls.URL_MODERATE, views.moderate_view),
(urls.URL_MODERATE_BLACKLIST, views.moderate_blacklist_view),
(urls.URL_DATAHUB_DASHBOARD, views.datahub_dashboard_view),
] +
[ (url, views.static_content) for url in
urls.STATIC_CONTENT_FILES.iterkeys() ] +
[ ('/.*', views.not_found_handler) ],
debug=deploy.is_local_development())
def main():
"""this comment to appease pylint."""
if deploy.is_local_development():
logging.info("deploy.is_local_development()==True")
else:
# we have lots of debug and info's
logging.getLogger().setLevel(logging.WARNING)
run_wsgi_app(APPLICATION)
if __name__ == "__main__":
main()
| Python |
#!/usr/bin/python2.5
# Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Configuration class.
This class provides a dictionary of run-time configuration options for the
application.
You can edit the values in the datastore editor of the admin console or other
datastore editing tools.
The values are cached both in memcache (which can be flushed) and locally
in the running Python instance, which has an indeterminite but typically short
life time.
To use the class:
import config
configvalue = config.config.get_value('valuename')
"""
from google.appengine.api import memcache
from google.appengine.ext import db
class Config(db.Model):
"""Configuration parameters.
The key name is used as the name of the parameter.
"""
description = db.StringProperty()
value = db.StringProperty(required=True)
MEMCACHE_ENTRY = 'Config'
# Warning: do not add private/secret configuration values used in production
# to these default values. The default values are intended for development.
# Production values must be stored in the datastore.
DEFAULT_VALUES = {}
local_config_cache = None
@classmethod
def get_value(cls, name):
"""Retrieves the value of a configuration parameter.
Args:
name: the name of the parameter whose value we are looking for.
Returns:
The value of the parameter or None if the parameter is unknown.
"""
if cls.local_config_cache is None:
# The local cache is empty, retrieve its content from memcache.
cache = memcache.get(cls.MEMCACHE_ENTRY)
if cache is None:
# Nothing in memcache either, recreate the cache from the datastore.
cache = dict(cls.DEFAULT_VALUES)
for parameter in Config.all():
cache[parameter.key().name()] = parameter.value
# Save the full cache in memcache with 1h expiration time.
memcache.add(cls.MEMCACHE_ENTRY, cache, 60*60)
cls.local_config_cache = cache
# Retrieve the value from the cache.
return cls.local_config_cache.get(name)
| Python |
#!/usr/bin/python2.5
# Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""User Info module (userinfo).
This file contains the base class for the userinfo classes.
It also contains (at least for now) subclasses for different login types."""
__author__ = 'matthew.blain@google.com'
import logging
import os
from django.utils import simplejson
from google.appengine.api import memcache
from google.appengine.api import urlfetch
from StringIO import StringIO
from facebook import Facebook
import deploy
import models
import utils
class Error(Exception): pass
class NotLoggedInError(Error): pass
class ThirdPartyError(Error): pass
USERINFO_CACHE_TIME = 120 # seconds
# Keys specific to Footprint
FRIENDCONNECT_KEY = '02962301966004179520'
def get_cookie(cookie_name):
if 'HTTP_COOKIE' in os.environ:
cookies = os.environ['HTTP_COOKIE']
cookies = cookies.split('; ')
for cookie in cookies:
cookie = cookie.split('=')
if cookie[0] == cookie_name:
return cookie[1]
def get_user(request):
for cls in (TestUser, FriendConnectUser, FacebookUser):
cookie = cls.get_cookie()
if cookie:
key = 'cookie:' + cookie
user = memcache.get(key)
if not user:
try:
user = cls(request)
memcache.set(key, user, time = USERINFO_CACHE_TIME)
except:
# This hides all errors from the Facebook client library
# TODO(doll): Hand back an error message to the user
logging.exception("Facebook or Friend Connect client exception.")
return None
return user
def get_usig(user):
"""Get a signature for the current user suitable for an XSRF token."""
if user and user.get_cookie():
return utils.signature(user.get_cookie())
class User(object):
"""The User info for a user related to a currently logged in session.."""
def __init__(self, account_type, user_id, display_name, thumbnail_url):
self.account_type = account_type
self.user_id = user_id
self.display_name = display_name
self.thumbnail_url = thumbnail_url
self.user_info = None
self.friends = None
self.total_friends = None
@staticmethod
def get_current_user(self):
raise NotImplementedError
def get_user_info(self):
if not self.user_info:
self.user_info = models.UserInfo.get_or_insert_user(self.account_type,
self.user_id)
return self.user_info
def load_friends(self):
key_suffix = self.account_type + ":" + self.user_id
key = 'friends:' + key_suffix
total_key = 'total_friends:' + key_suffix
self.friends = memcache.get(key)
self.total_friends = memcache.get(total_key)
if not self.friends:
self.friends = self.get_friends_by_url();
memcache.set(key, self.friends, time = USERINFO_CACHE_TIME)
memcache.set(total_key, self.total_friends, time = USERINFO_CACHE_TIME)
return self.friends
def get_friends_by_url(self):
raise NotImplementedError
@classmethod
def is_logged_in(cls):
cookie = cls.get_cookie()
return not not cookie
class FriendConnectUser(User):
"""A friendconnect user."""
BASE_URL = 'http://www.google.com/friendconnect/api/people/'
USER_INFO_URL = BASE_URL + '@viewer/@self?fcauth=%s'
FRIEND_URL = BASE_URL + '@viewer/@friends?fcauth=%s'
def __init__(self, request):
"""Creates a friendconnect user from the current env, or raises error."""
self.fc_user_info = self.get_fc_user_info()
super(FriendConnectUser, self).__init__(
models.UserInfo.FRIENDCONNECT,
self.fc_user_info['entry']['id'],
self.fc_user_info['entry']['displayName'],
self.fc_user_info['entry']['thumbnailUrl'])
def get_friends_by_url(self):
friend_cookie = self.get_cookie()
if not friend_cookie:
raise NotLoggedInError()
self.friends = []
url = self.FRIEND_URL % friend_cookie
result = urlfetch.fetch(url)
if result.status_code == 200:
friend_info = simplejson.load(StringIO(result.content))
self.total_friends = friend_info['totalResults']
for friend_object in friend_info['entry']:
friend = User(
models.UserInfo.FRIENDCONNECT,
friend_object['id'],
friend_object['displayName'],
friend_object['thumbnailUrl'])
self.friends.append(friend)
return self.friends
@classmethod
def get_cookie(cls):
return get_cookie('fcauth' + FRIENDCONNECT_KEY)
@classmethod
def get_fc_user_info(cls):
friend_cookie = cls.get_cookie()
if not friend_cookie:
raise NotLoggedInError()
return
url = cls.USER_INFO_URL % friend_cookie
result = urlfetch.fetch(url)
if result.status_code == 200:
user_info = simplejson.load(StringIO(result.content))
return user_info
else:
raise ThirdPartyError()
class FacebookUser(User):
def __init__(self, request):
self.facebook = Facebook(deploy.get_facebook_key(),
deploy.get_facebook_secret())
if not self.facebook.check_connect_session(request):
raise NotLoggedInError()
info = self.facebook.users.getInfo([self.facebook.uid],
['name', 'pic_square_with_logo'])[0]
super(FacebookUser, self).__init__(
models.UserInfo.FACEBOOK,
self.facebook.uid,
info['name'],
info['pic_square_with_logo'])
def get_friends_by_url(self):
if not self.facebook:
raise NotLoggedInError()
self.friends = []
friend_ids = self.facebook.friends.getAppUsers()
if not friend_ids or len(friend_ids) == 0:
friend_ids = [] # Force return type to be a list, not a dict or None.
self.total_friends = len(friend_ids)
# TODO: handle >20 friends.
friend_objects = self.facebook.users.getInfo([friend_ids[0:20]],
['name', 'pic_square_with_logo'])
for friend_object in friend_objects:
friend = User(
models.UserInfo.FACEBOOK,
`friend_object['uid']`,
friend_object['name'],
friend_object['pic_square_with_logo'])
self.friends.append(friend)
return self.friends
@classmethod
def get_cookie(cls):
return get_cookie(deploy.get_facebook_key())
class TestUser(User):
"""A really simple user example."""
def __init__(self, request):
"""Creates a user, or raises error."""
cookie = self.get_cookie()
if not (cookie):
raise NotLoggedInError()
super(TestUser, self).__init__(
models.UserInfo.TEST,
cookie,
cookie,
'images/Event-Selected-Star.png')
@classmethod
def get_cookie(cls):
return get_cookie('footprinttest')
def get_friends_by_url(self):
# TODO: Something clever for testing--like all TestUser?
return []
| Python |
#!/usr/bin/python2.5
# Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Datastore helper methods."""
import datetime
import logging
from google.appengine.api import memcache
from google.appengine.ext import db
def set_entity_attributes(entity, absolute_attributes, relative_attributes):
"""Set entity attributes, using absolute or relative values.
Args:
model_class: model class
key: Entity key.
absolute_attributes: Dictionary of attr_name:value pairs to set.
relative_attributes: Dictionary of attr_name:value pairs to set as
relative to current value. If some attr_name appears in both
the absolute and relative dictionaries, the absolute is set first.
Returns:
On error: (None, None)
On success: (entity, deltas)
entity: The entity after applying the changes
deltas: Dict of attr_name:delta_values, where each delta shows how
the change in value in the respective attribute.
"""
if not absolute_attributes:
absolute_attributes = {}
if not relative_attributes:
relative_attributes = {}
def txn(entity):
# Passed 'entity' as function parameter because of python scope rules.
entity = entity.get(entity.key())
# Initialize the deltas list with starting values. Also, set any undefined
# attribute to zero.
deltas = {}
combined_attributes = (set([x for x in absolute_attributes.iterkeys()]) |
set([x for x in relative_attributes.iterkeys()]))
for attr in combined_attributes:
if not getattr(entity, attr):
setattr(entity, attr, 0) # Ensure all attributes are defined.
deltas[attr] = 0
else:
deltas[attr] = getattr(entity, attr)
# Set absolute values first.
for attr in absolute_attributes.iterkeys():
setattr(entity, attr, absolute_attributes[attr])
# Set relative values.
for attr in relative_attributes.iterkeys():
# Here, we know getattr() is defined, since we initialized all undefined
# attributes at the top of this function.
setattr(entity, attr, getattr(entity, attr) + relative_attributes[attr])
# Compute the final delta value for each attribute.
for attr in combined_attributes:
deltas[attr] = getattr(entity, attr) - deltas[attr]
entity.put()
return (entity, deltas)
try:
return_value = db.run_in_transaction(txn, entity)
return return_value
except Exception:
logging.exception('set_entity_attributes failed for key %s' %
entity.key().id_or_name())
return (None, None)
def get_by_ids(cls, ids):
"""Gets multiple entities for IDs, trying memcache then datastore.
Args:
cls: Model class
ids: list of ids.
Returns:
Dictionary of results, id:model.
"""
results = memcache.get_multi(ids, cls.MEMCACHE_PREFIX + ':')
datastore_prefix = cls.DATASTORE_PREFIX
missing_ids = []
for id in ids:
if not id in results:
missing_ids.append(datastore_prefix + id)
datastore_results = cls.get_by_key_name(missing_ids)
for result in datastore_results:
if result:
result_id = result.key().name()[len(datastore_prefix):]
results[result_id] = result
return results
| Python |
#!/usr/bin/python2.5
# Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Datastore models."""
from google.appengine.api import memcache
from google.appengine.ext import db
import modelutils
class Error(Exception):
"""Generic error."""
pass
class BadAccountType(Error):
"""Account type is unknown (not facebook, friendconnect, or test)."""
pass
# Models
class UserInfo(db.Model):
"""Basic user statistics/preferences data."""
# Key is accounttype:user_id.
first_visit = db.DateTimeProperty(auto_now_add=True)
last_edit = db.DateTimeProperty(auto_now=True)
moderator = db.BooleanProperty(default=False)
moderator_request_email = db.StringProperty()
moderator_request_desc = db.TextProperty()
moderator_request_admin_notes = db.StringProperty(multiline=True)
def account_type(self):
"""Returns one of (FRIENDCONNECT, FACEBOOK, TEST)."""
key_name = self.key().name()
return key_name.split(':', 1)[0]
def user_id(self):
"""User id."""
key_name = self.key().name()
return key_name.split(':', 1)[1]
# Known types of accounts. Type must not start with a number.
FRIENDCONNECT = 'friendconnect'
FACEBOOK = 'facebook'
TEST = 'test'
KNOWN_TYPES = (FRIENDCONNECT, FACEBOOK, TEST)
@classmethod
def get_or_insert_user(cls, account_type, user_id):
"""Gets existing or creates a new user.
Similar to get_or_insert, increments UserStats if appropriate.
Args:
account_type: Type of account used.
user_id: address within that system.
Returns:
UserInfo for this user.
Raises:
BadAccountType if the account_type is unknown.
Various datastore exceptions.
"""
if not account_type in cls.KNOWN_TYPES:
raise BadAccountType()
key_name = '%s:%s' % (account_type, user_id)
user_info = cls.get_by_key_name(key_name)
def txn():
"""Transaction to get or insert user."""
entity = cls.get_by_key_name(key_name)
created_entity = False
if entity is None:
entity = cls(key_name=key_name)
entity.put()
created_entity = True
return (entity, created_entity)
(user_info, created_entity) = db.run_in_transaction(txn)
if created_entity:
UserStats.increment(account_type, user_id)
return user_info
class UserStats(db.Model):
"""Stats about how many users we have."""
count = db.IntegerProperty(default=0)
@classmethod
def increment(cls, account_type, user_id):
"""Sharded counter. User ID is only for sharding."""
def txn():
"""Transaction to increment account_type's stats."""
# We want << 1000 shards.
# This cheesy shard mechanism allows us some amount of way to see how
# many users of each type we have too.
shard_name = account_type + ':' + user_id[:2]
counter = cls.get_by_key_name(shard_name)
if not counter:
counter = cls(key_name=shard_name)
counter.count += 1
counter.put()
db.run_in_transaction(txn)
@staticmethod
def get_count():
"""Returns total number of users."""
total = 0
for counter in UserStats.all():
total += counter.count
return total
class UserInterest(db.Model):
"""Our record a user's actions related to an opportunity."""
# Key is ('id:%s#%s' % (the stable ID from base, user key name))
# stable ID is probabaly not the same ID provided in the feed from providers.
DATASTORE_PREFIX = 'id:'
user = db.ReferenceProperty(UserInfo, collection_name='interests')
opp_id = db.StringProperty()
liked_last_modified = db.DateTimeProperty()
# The interest types (liked, will_attend, etc) must exist with the
# same property names in UserInterest and VolunteerOpportunityStats,
# and be in sync with USER_INTEREST_ATTRIBUTES at the end of this file.
liked = db.IntegerProperty(default=0)
will_attend = db.IntegerProperty(default=0)
flagged = db.IntegerProperty(default=0)
@classmethod
def make_key_name(cls, user_entity, opp_id):
"""Generate key name for a given user_entity/opp_id pair."""
return '%s:%s#%s' % (cls.DATASTORE_PREFIX, opp_id, user_entity.key().name())
class VolunteerOpportunityStats(db.Model):
"""Basic statistics about opportunities."""
# The __key__ is 'id:' + volunteer_opportunity_id
DATASTORE_PREFIX = 'id:'
MEMCACHE_PREFIX = 'VolunteerOpportunityStats:'
MEMCACHE_TIME = 60000 # seconds
last_edit = db.DateTimeProperty(auto_now=True)
# The interest types (liked, will_attend, etc) must exist with the
# same property names in UserInterest and VolunteerOpportunityStats,
# and be in sync with USER_INTEREST_ATTRIBUTES at the end of this file.
liked = db.IntegerProperty(default=0)
will_attend = db.IntegerProperty(default=0)
flagged = db.IntegerProperty(default=0)
# Blacklist is controlled by the moderators only, it is not a statistic.
blacklisted = db.IntegerProperty(default=0)
@classmethod
def increment(cls, volunteer_opportunity_id, relative_attributes,
absolute_attributes=None):
"""Helper to increment volunteer opportunity stats.
Example:
VolunteerOpportunityStats.increment(opp_id,
{ USER_INTEREST_LIKED: 1, USER_INTEREST_WILL_ATTEND: 1 })
Args:
volunteer_opportunity_id: ID of opportunity.
relative_attributes: Dictionary of attr_name:value pairs to set as
relative to current value.
absolute_attributes: Dictionary of attr_name:value pairs to set as
absolute values.
Returns:
Success boolean
"""
entity = VolunteerOpportunityStats.get_or_insert(
cls.DATASTORE_PREFIX + volunteer_opportunity_id)
if not entity:
return False
(new_entity, unused_deltas) = \
modelutils.set_entity_attributes(entity, absolute_attributes,
relative_attributes)
memcache.set(cls.MEMCACHE_PREFIX + volunteer_opportunity_id, new_entity,
time=cls.MEMCACHE_TIME)
return True
@classmethod
def set_blacklisted(cls, volunteer_opportunity_id, value):
"""Helper to set volunteer opportunity value and update memcache."""
# A wrapper for 'increment'--it's overkill, but manages memcache for us.
return cls.increment(volunteer_opportunity_id, {}, {'blacklisted' : value})
@classmethod
def add_default_entities_to_memcache(cls, ids):
"""Add blank entities to memcache so get_by_ids quickly returns them."""
entities = {}
for key in ids:
entities[key] = cls(key_name= cls.DATASTORE_PREFIX + key)
memcache.add_multi(entities, time=cls.MEMCACHE_TIME,
key_prefix=cls.MEMCACHE_PREFIX)
class VolunteerOpportunity(db.Model):
"""Basic information about opportunities.
Separate from VolunteerOpportunityStats because these entries need not be
operated on transactionally since there's no counts.
"""
# The __key__ is 'id:' + volunteer_opportunity_id
DATASTORE_PREFIX = 'id:'
MEMCACHE_PREFIX = 'VolunteerOpportunity:'
MEMCACHE_TIME = 60000 # seconds
# Information about the opportunity
# URL to the Google Base entry
base_url = db.StringProperty()
# When we last update the Base URL.
last_base_url_update = db.DateTimeProperty()
# Incremented (possibly incorrectly to avoid transactions) when we try
# to load the data from base but fail. Also the last date/time seen.
base_url_failure_count = db.IntegerProperty(default=0)
last_base_url_update_failure = db.DateTimeProperty()
# TODO(paul): added_to_calendar, added_to_facebook_profile, etc
USER_INTEREST_LIKED = 'liked'
USER_INTEREST_WILL_ATTEND = 'will_attend'
USER_INTEREST_FLAGGED = 'flagged'
USER_INTEREST_ATTRIBUTES = (
USER_INTEREST_LIKED,
USER_INTEREST_WILL_ATTEND,
USER_INTEREST_FLAGGED,
)
| Python |
#!/usr/bin/python
# Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import datetime
import logging
import re
import hashlib
import geocode
import utils
from xml.dom import minidom
from xml.sax.saxutils import escape
from google.appengine.ext import db
class Error(Exception):
pass
# status codes
# - string names to make them human-readable, i.e. easier debugging
# - leading number provides SQL/GQL sorting without an extra field
# (sorting is important for the moderator UI, to make sure most-
# likely-to-be-safe is ranked higher). Note: edited comes before
# plain new
# - substrings (e.g. "NEW") provide groupings, e.g. is this a 'new'
# listing, so the moderator UI know what visual treatment to give it.
NEW_EDITED_VERIFIED = "90.NEW_EDITED_VERIFIED"
NEW_VERIFIED = "80.NEW_VERIFIED"
NEW_EDITED = "70.NEW_EDITED"
NEW = "50.NEW"
NEW_DEFERRED = "40.NEW_DEFERRED"
ACCEPTED_MANUAL = "10.ACCEPTED_MANUAL"
ACCEPTED_AUTOMATIC = "10.ACCEPTED_AUTOMATIC"
REJECTED_MANUAL = "10.REJECTED_MANUAL"
REJECTED_AUTOMATIC = "10.REJECTED_AUTOMATIC"
class Posting(db.Model):
"""Postings going through the approval process."""
# Key is assigned ID (not the stable ID)
item_id = db.StringProperty(default="")
status = db.StringProperty(default=NEW)
# for queries, parse-out these fields - note that we don't care about datatypes
quality_score = db.FloatProperty(default=1.0)
creation_time = db.DateTimeProperty(auto_now_add=True)
start_date = db.DateProperty(auto_now_add=True)
# listing_xml is the full contents for the listing, assuming it gets approved
# note: listing_xml also used for fulltext queries
listing_xml = db.TextProperty(default="")
# parse-out these fields to improve latency in the moderation UI
title = db.StringProperty(default="")
description = db.TextProperty(default="")
# as per http://code.google.com/p/googleappengine/issues/detail?id=105
# there's no point in GeoPT esp. given that we're only using this for display
# there's even bugs (http://aralbalkan.com/1355) in GeoPT, so the heck with it.
#todo latlong = db.StringProperty(default="")
def statusChar(self):
if self.status.find("ACCEPTED")>=0:
return "A"
if self.status.find("REJECTED")>=0:
return "R"
return ""
def showInModerator(self):
return (self.status.find("NEW")>=0)
def isLive(self):
return (self.status.find("ACCEPTED")>=0)
def reset(self):
self.status = NEW
self.put()
def edit(self):
self.status = NEW_EDITED
self.put()
def verify(self):
if self.status == NEW:
self.status = NEW_VERIFIED
self.put()
elif self.status == NEW_EDITED:
# TODO: how do we know the edits didn't after the email was sent?
self.status = NEW_EDITED_VERIFIED
self.put()
def accept(self, type="MANUAL"):
if type == "AUTOMATIC":
self.status = ACCEPTED_AUTOMATIC
else:
self.status = ACCEPTED_MANUAL
self.put()
def reject(self, type="MANUAL"):
if type == "AUTOMATIC":
self.status = REJECTED_AUTOMATIC
else:
self.status = REJECTED_MANUAL
self.put()
def computeQualityScore(self):
# TODO: walk the object to look for missing/bad fields
self.quality_score = 1.0
self.put()
def process(args):
for arg in args:
if arg[0] != "v":
continue
keystr = arg[1:]
el = Posting.get(keystr)
if el == None:
# already deleted!
continue
# TODO: remove quality score hack-- this is how to rank in moderator UI
if args[arg] == "A":
el.accept()
elif args[arg] == "R":
el.reject()
elif args[arg] == "V":
el.verify()
elif args[arg] == "X":
logging.debug("deleting: "+keystr+" title="+el.title)
el.delete()
elif args[arg] == "":
el.reset()
def query(num=25, start=1, quality_score=0.5, start_date="2009-01-01"):
# TODO: GQL doesn't support string-CONTAINS, limiting keyword search
# TODO: GQL doesn't let you do inequality comparison on multiple fields.
if quality_score == 0.0:
sd = datetime.strptime(start_date, "%Y-%m-%d")
q = db.GqlQuery("SELECT * FROM Posting " +
"WHERE start_date >= :1 " +
"ORDER BY status ASC, start_date ASC " +
"LIMIT %d OFFSET %d" % (int(num), int(start)),
sd.date())
else:
q = db.GqlQuery("SELECT * FROM Posting " +
"ORDER BY status ASC,quality_score DESC " +
"LIMIT %d OFFSET %d" % (int(num), int(start)))
result_set = q.fetch(num)
reslist = []
for result in result_set:
result.key = str(result.key())
result.listing_fmtd = re.sub(r'><', '-qbr--', result.listing_xml);
result.listing_fmtd = re.sub(r'(<?/[a-zA-Z]+-qbr--)+', '-qbr--', result.listing_fmtd);
result.listing_fmtd = re.sub(r'>', ': ', result.listing_fmtd);
result.listing_fmtd = re.sub(r'-qbr--', '<br/>', result.listing_fmtd)
result.listing_fmtd = re.sub(r'(<br/>)+', '<br/>', result.listing_fmtd)
result.status_char = result.statusChar()
reslist.append(result)
return reslist
def create_from_xml(xml):
try:
dom = minidom.parseString(xml)
except:
return ""
posting = Posting(listing_xml=xml)
posting.title = utils.xml_elem_text(dom, "title", '')
logging.debug("create_from_xml: title="+posting.title)
logging.debug("create_from_xml: xml="+xml)
posting.description = utils.xml_elem_text(dom, "description", '')
try:
start_date = datetime.strptime(utils.xml_elem_text(
dom, "startDate", ''), "%Y-%m-%d")
posting.start_date = start_date.date()
except:
pass
# ignore bad start date
posting.item_id = hashlib.md5(xml+str(posting.creation_time)).hexdigest()
posting.put()
return posting.key()
argnames = {
"title":1, "description":1, "skills":1, "virtual":1, "addr1":1, "addrname1":1,
"sponsoringOrganizationName":1, "openEnded":1, "startDate":1,
"startTime":1, "endTime":1, "endDate":1, "contactNoneNeeded":1,
"contactEmail":1, "contactPhone":1, "contactName":1, "detailURL":1,
"weeklySun":1, "weeklyMon":1, "weeklyTue":1, "weeklyWed":1, "weeklyThu":1,
"weeklyFri":1, "weeklySat":1, "biweeklySun":1, "biweeklyMon":1,
"biweeklyTue":1, "biweeklyWed":1, "biweeklyThu":1, "biweeklyFri":1,
"biweeklySat":1, "recurrence":1, "audienceAll":1, "audienceAge":1,
"minAge":1, "audienceSexRestricted":1, "sexRestrictedTo":1,
"commitmentHoursPerWeek":1, "city":1, "region":1, "postalCode":1,
"country":1, "street1":1, "street2":1, "location_string":1
}
# TODO: replace with a better parser-- after wasting hours, I gave up
# on strptime(). Do not add to utils.py -- this is a bad hack
def parseTimestamp(dateStr, timeStr):
dateStr = dateStr.strip()
grp = re.match(r'(\d?\d)[/-]?(\d?\d)[/-]?(\d\d\d\d)', dateStr)
if grp:
month = int(grp.group(1))
day = int(grp.group(2))
year = int(grp.group(3))
else:
grp = re.match(r'(\d?\d)[/-]?(\d?\d)[/-]?(\d\d)', dateStr)
if grp:
month = int(grp.group(1))
day = int(grp.group(2))
year = int(grp.group(3)) + 1900
else:
grp = re.match(r'(\d\d\d\d)[/-]?(\d\d)[/-]?(\d\d)', dateStr)
if grp:
year = int(grp.group(1))
month = int(grp.group(2))
day = int(grp.group(3))
else:
return None
hour = minute = 0
timeStr = timeStr.strip().upper()
grp = re.match(r'(\d?\d):(\d\d) *(AM|PM)?', timeStr)
if grp:
hour = int(grp.group(1))
minute = int(grp.group(2))
ampm = grp.group(3)
if ampm == "PM":
hour += 12
else:
return None
try:
return datetime(year, month, day, hour, minute, 0)
except:
return None
def cleanup_args(vals):
# keep only known argnames
for key in vals:
if key in argnames:
vals[key] = escape(vals[key])
#vals[key] = re.sub(r'(<!\[CDATA\[\|\]\]>)', r'', vals[key])
else:
vals[key] = ""
for key in argnames:
if key not in vals:
vals[key] = ""
# blank-out incompatible fields
if vals["virtual"] != "No":
vals["virtual"] = "Yes"
vals["addr1"] = vals["addrname1"] = ""
if vals["openEnded"] != "No":
vals["openEnded"] = "Yes"
vals["startDate"] = vals["startTime"] = ""
vals["endDate"] = vals["endTime"] = ""
# footprint isn't very interesting when it comes to gender
if len(vals["sexRestrictedTo"]) < 1:
vals["sexRestrictedTo"] = ""
elif vals["sexRestrictedTo"][0].upper() == "M":
vals["sexRestrictedTo"] = "M"
elif vals["sexRestrictedTo"][0].upper() == "F":
vals["sexRestrictedTo"] = "F"
else:
vals["sexRestrictedTo"] = ""
# once, one-time or weekly, then blank-out biweekly
if (vals["recurrence"] == "Weekly" or
vals["recurrence"] == "No" or
vals["recurrence"] == "Daily"):
for arg in argnames:
if arg.find("biweekly") == 0:
vals[arg] == ""
# once, one-time or biweekly, then blank-out weekly
if (vals["recurrence"] == "BiWeekly" or
vals["recurrence"] == "No" or
vals["recurrence"] == "Daily"):
for arg in argnames:
if arg.find("weekly") == 0:
vals[arg] == ""
def add_new_fields(vals, newvals):
if vals["country"] == "":
vals["country"] = "US"
addr = vals["street1"]
addr += " "+vals["street2"]
addr += " "+vals["city"]
addr += " "+vals["region"]
addr += " "+vals["country"]
newvals["complete_addr"] = addr
logging.debug("post: geocoding "+addr)
latlong = geocode.geocode(addr)
logging.debug("post: latlong="+latlong)
if latlong == "":
newvals["latitude"] = newvals["longitude"] = ""
else:
newvals["latitude"],newvals["longitude"] = latlong.split(",")[:2]
newvals["parsedStartDate"] = newvals["parsedStartTime"] = ""
newvals["parsedEndDate"] = newvals["parsedEndTime"] = ""
if vals["openEnded"] == "No":
startTs = parseTimestamp(vals["startDate"], vals["startTime"])
if startTs:
newvals["parsedStartDate"] = startTs.strftime("%Y-%m-%d")
newvals["parsedStartTime"] = startTs.strftime("%H:%M:%S")
endTs = parseTimestamp(vals["endDate"], vals["endTime"])
if endTs:
newvals["parsedEndDate"] = endTs.strftime("%Y-%m-%d")
newvals["parsedEndTime"] = endTs.strftime("%H:%M:%S")
newvals["computedMinAge"] = 0
if vals["audienceAge"] == "seniors":
newvals["computedMinAge"] = 60
elif vals["audienceAge"] == "teens":
newvals["computedMinAge"] = 13
elif vals["audienceAge"] == "anyage":
newvals["computedMinAge"] = 0
else:
try:
newvals["computedMinAge"] = int(vals["minAge"])
except:
newvals["computedMinAge"] = 0
try:
newvals["computedCommitmentHoursPerWeek"] = int(vals["commitmentHoursPerWeek"])
if newvals["computedCommitmentHoursPerWeek"] < 0:
newvals["computedCommitmentHoursPerWeek"] = 0
except:
newvals["computedCommitmentHoursPerWeek"] = 0
def create_from_args(vals, computed_vals):
# note: don't need to worry (much) about hacked-forms because we're
# using CAPTCHA to avoid bot submissions.
cleanup_args(vals)
add_new_fields(vals, computed_vals)
if vals["virtual"] == 'No' and computed_vals["latitude"] == "":
return 402, "", "cannot find address: '"+computed_vals["complete_addr"]+"'"
xml = "<VolunteerOpportunity>"
if vals["recaptcha_response_field"] == "test":
# basic security measure
xml += "<isTest>Yes</isTest>"
vals["title"] = "T:" + vals["title"]
vals["description"] = "TEST DELETEME: " + vals["description"]
# TODO: organization
#xml += "<volunteerOpportunityID>%d</volunteerOpportunityID>" % (item_id)
#xml += "<sponsoringOrganizationIDs><sponsoringOrganizationID>%d</sponsoringOrganizationID></sponsoringOrganizationIDs>" % (item_id)
#xml += "<volunteerHubOrganizationIDs><volunteerHubOrganizationID>%s</volunteerHubOrganizationID></volunteerHubOrganizationIDs>" % ("")
xml += "<title>%s</title>" % (vals["title"])
xml += "<description>%s</description>" % (vals["description"])
xml += "<skills>%s</skills>" % (vals["skills"])
xml += "<minimumAge>%s</minimumAge>" % (str(computed_vals["computedMinAge"]))
xml += "<detailURL>%s</detailURL>" % (vals["detailURL"])
xml += "<locations>"
xml += "<location>"
xml += "<name>%s</name>" % (vals["addrname1"])
xml += "<city>%s</city>" % (vals["city"])
xml += "<region>%s</region>" % (vals["region"])
xml += "<postalCode>%s</postalCode>" % (vals["postalCode"])
xml += "<country>%s</country>" % (vals["country"])
xml += "<latitude>%s</latitude>" % (computed_vals["latitude"])
xml += "<longitude>%s</longitude>" % (computed_vals["longitude"])
xml += "</location>"
xml += "</locations>"
# TODO: category tags
#xml += "<categoryTags>"
#xml += "<categoryTag>Community</categoryTag>"
#xml += "</categoryTags>"
xml += "<dateTimeDurations>"
xml += "<dateTimeDuration>"
xml += "<openEnded>%s</openEnded>" % (vals["openEnded"])
if vals["openEnded"] == "No":
xml += "<startDate>%s</startDate>" % (computed_vals["startDate"])
xml += "<startTime>%s</startTime>" % (computed_vals["startTime"])
xml += "<endDate>%s</endDate>" % (computed_vals["endDate"])
xml += "<endTime>%s</endTime>" % (computed_vals["endTime"])
xml += "<commitmentHoursPerWeek>%d</commitmentHoursPerWeek>" % \
(computed_vals["computedCommitmentHoursPerWeek"])
xml += "</dateTimeDuration>"
xml += "</dateTimeDurations>"
xml += "</VolunteerOpportunity>"
#logging.info(re.sub(r'><', '>\n<', xml))
item_id = create_from_xml(xml)
return 200, item_id, xml
def createTestDatabase():
id1 = create_from_xml("<VolunteerOpportunity><volunteerOpportunityID>1001</volunteerOpportunityID><sponsoringOrganizationIDs><sponsoringOrganizationID>1</sponsoringOrganizationID></sponsoringOrganizationIDs><volunteerHubOrganizationIDs><volunteerHubOrganizationID>3011</volunteerHubOrganizationID></volunteerHubOrganizationIDs><title>Be a Business Mentor - Trenton, NJ & Beyond</title><dateTimeDurations><dateTimeDuration><openEnded>Yes</openEnded><duration>P6M</duration><commitmentHoursPerWeek>4</commitmentHoursPerWeek></dateTimeDuration></dateTimeDurations><locations><location><city>Trenton</city><region>NJ</region><postalCode>08608</postalCode></location><location><city>Berkeley</city><region>CA</region><postalCode>94703</postalCode></location><location><city>Santa Cruz</city><region>CA</region><postalCode>95062</postalCode></location></locations><categoryTags><categoryTag>Community</categoryTag><categoryTag>Computers & Technology</categoryTag><categoryTag>Employment</categoryTag></categoryTags><minimumAge>21</minimumAge><skills>In order to maintain the integrity of the MicroMentor program, we require that our Mentor volunteers have significant business experience and expertise, such as: 3 years of business ownership experience</skills><detailURL>http://www.volunteermatch.org/search/index.jsp?l=08540</detailURL><description>This is where you come in. Simply by sharing your business know-how, you can make a huge difference in the lives of entrepreneurs from low-income and marginalized communities, helping them navigate the opportunities and challenges of running a business and improving their economic well-being and creating new jobs where they are most needed.</description></VolunteerOpportunity>")
id2 = create_from_xml("<VolunteerOpportunity><volunteerOpportunityID>2001</volunteerOpportunityID><sponsoringOrganizationIDs><sponsoringOrganizationID>2</sponsoringOrganizationID></sponsoringOrganizationIDs><title>DODGEBALL TO HELP AREA HUNGRY</title><dateTimeDurations><dateTimeDuration><openEnded>No</openEnded><startDate>2009-02-22</startDate><endDate>2009-02-22</endDate><startTime>18:45:00</startTime><endTime>21:00:00</endTime></dateTimeDuration><dateTimeDuration><openEnded>No</openEnded><startDate>2009-02-27</startDate><endDate>2009-02-27</endDate><startTime>18:45:00</startTime><endTime>21:00:00</endTime></dateTimeDuration></dateTimeDurations><locations><location><city>West Windsor</city><region>NJ</region><postalCode>08550</postalCode></location></locations><audienceTags><audienceTag>Teens</audienceTag><audienceTag>High School Students</audienceTag></audienceTags><categoryTags><categoryTag>Community</categoryTag><categoryTag>Homeless & Hungry</categoryTag><categoryTag>Hunger</categoryTag></categoryTags><minimumAge>14</minimumAge><skills>Must be in High School</skills><detailURL>http://www.volunteermatch.org/search/opp451561.jsp</detailURL><description>The Mercer County Quixote Quest Teen Volunteer Club is hosting a FUN Dodgeball Tournament at Mercer County College on Sunday afternoon, February 22nd. The proceeds from the event will bebefit the Trenton Area Soup Kitchen. Teens are invited to enter a team of six...with at least three female players (3 guys and 3 girls or more girls). Each team playing will bring a $50 entry fee and a matching sponsor donation of $50. (Total of $100 from each team).</description><lastUpdated olsonTZ=\"America/Denver\">2009-02-02T19:02:01</lastUpdated></VolunteerOpportunity>")
id3 = create_from_xml("<VolunteerOpportunity><volunteerOpportunityID>2002</volunteerOpportunityID><sponsoringOrganizationIDs><sponsoringOrganizationID>2</sponsoringOrganizationID></sponsoringOrganizationIDs><title>YOUNG ADULT TO HELP GUIDE MERCER COUNTY TEEN VOLUNTEER CLUB</title><volunteersNeeded>3</volunteersNeeded><dateTimeDurations><dateTimeDuration><openEnded>No</openEnded><startDate>2009-01-01</startDate><endDate>2009-05-31</endDate><iCalRecurrence>FREQ=WEEKLY;INTERVAL=2</iCalRecurrence><commitmentHoursPerWeek>2</commitmentHoursPerWeek></dateTimeDuration></dateTimeDurations><locations><location><city>Mercer County</city><region>NJ</region><postalCode>08610</postalCode></location></locations><audienceTags><audienceTag>Teens</audienceTag></audienceTags><categoryTags><categoryTag>Community</categoryTag><categoryTag>Children & Youth</categoryTag></categoryTags><skills>Be interested in promoting youth volunteerism. Be available two Tuesday evenings per month.</skills><detailURL>http://www.volunteermatch.org/search/opp200517.jsp</detailURL><description>Quixote Quest is a volunteer club for teens who have a passion for community service. The teens each volunteer for their own specific cause. Twice monthly, the club meets. At the club meetings the teens from different high schools come together for two hours to talk about their volunteer experiences and spend some hang-out time together that helps them bond as fraternity...family. Quixote Quest is seeking young adults roughly between 20 and 30 years of age who would be interested in being a guide and advisor to the teens during these two evening meetings a month.</description><lastUpdated olsonTZ=\"America/Denver\">2008-12-02T19:02:01</lastUpdated></VolunteerOpportunity>")
return (id1,id2,id3)
| Python |
#!/usr/bin/python
# Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import datetime
import logging
import re
import hashlib
import geocode
import utils
from xml.dom import minidom
from xml.sax.saxutils import escape
from google.appengine.ext import db
class Error(Exception):
pass
# status codes
# - string names to make them human-readable, i.e. easier debugging
# - leading number provides SQL/GQL sorting without an extra field
# (sorting is important for the moderator UI, to make sure most-
# likely-to-be-safe is ranked higher). Note: edited comes before
# plain new
# - substrings (e.g. "NEW") provide groupings, e.g. is this a 'new'
# listing, so the moderator UI know what visual treatment to give it.
NEW_EDITED_VERIFIED = "90.NEW_EDITED_VERIFIED"
NEW_VERIFIED = "80.NEW_VERIFIED"
NEW_EDITED = "70.NEW_EDITED"
NEW = "50.NEW"
NEW_DEFERRED = "40.NEW_DEFERRED"
ACCEPTED_MANUAL = "10.ACCEPTED_MANUAL"
ACCEPTED_AUTOMATIC = "10.ACCEPTED_AUTOMATIC"
REJECTED_MANUAL = "10.REJECTED_MANUAL"
REJECTED_AUTOMATIC = "10.REJECTED_AUTOMATIC"
class Posting(db.Model):
"""Postings going through the approval process."""
# Key is assigned ID (not the stable ID)
item_id = db.StringProperty(default="")
status = db.StringProperty(default=NEW)
# for queries, parse-out these fields - note that we don't care about datatypes
quality_score = db.FloatProperty(default=1.0)
creation_time = db.DateTimeProperty(auto_now_add=True)
start_date = db.DateProperty(auto_now_add=True)
# listing_xml is the full contents for the listing, assuming it gets approved
# note: listing_xml also used for fulltext queries
listing_xml = db.TextProperty(default="")
# parse-out these fields to improve latency in the moderation UI
title = db.StringProperty(default="")
description = db.TextProperty(default="")
# as per http://code.google.com/p/googleappengine/issues/detail?id=105
# there's no point in GeoPT esp. given that we're only using this for display
# there's even bugs (http://aralbalkan.com/1355) in GeoPT, so the heck with it.
#todo latlong = db.StringProperty(default="")
def statusChar(self):
if self.status.find("ACCEPTED")>=0:
return "A"
if self.status.find("REJECTED")>=0:
return "R"
return ""
def showInModerator(self):
return (self.status.find("NEW")>=0)
def isLive(self):
return (self.status.find("ACCEPTED")>=0)
def reset(self):
self.status = NEW
self.put()
def edit(self):
self.status = NEW_EDITED
self.put()
def verify(self):
if self.status == NEW:
self.status = NEW_VERIFIED
self.put()
elif self.status == NEW_EDITED:
# TODO: how do we know the edits didn't after the email was sent?
self.status = NEW_EDITED_VERIFIED
self.put()
def accept(self, type="MANUAL"):
if type == "AUTOMATIC":
self.status = ACCEPTED_AUTOMATIC
else:
self.status = ACCEPTED_MANUAL
self.put()
def reject(self, type="MANUAL"):
if type == "AUTOMATIC":
self.status = REJECTED_AUTOMATIC
else:
self.status = REJECTED_MANUAL
self.put()
def computeQualityScore(self):
# TODO: walk the object to look for missing/bad fields
self.quality_score = 1.0
self.put()
def process(args):
for arg in args:
if arg[0] != "v":
continue
keystr = arg[1:]
el = Posting.get(keystr)
if el == None:
# already deleted!
continue
# TODO: remove quality score hack-- this is how to rank in moderator UI
if args[arg] == "A":
el.accept()
elif args[arg] == "R":
el.reject()
elif args[arg] == "V":
el.verify()
elif args[arg] == "X":
logging.debug("deleting: "+keystr+" title="+el.title)
el.delete()
elif args[arg] == "":
el.reset()
def query(num=25, start=1, quality_score=0.5, start_date="2009-01-01"):
# TODO: GQL doesn't support string-CONTAINS, limiting keyword search
# TODO: GQL doesn't let you do inequality comparison on multiple fields.
if quality_score == 0.0:
sd = datetime.strptime(start_date, "%Y-%m-%d")
q = db.GqlQuery("SELECT * FROM Posting " +
"WHERE start_date >= :1 " +
"ORDER BY status ASC, start_date ASC " +
"LIMIT %d OFFSET %d" % (int(num), int(start)),
sd.date())
else:
q = db.GqlQuery("SELECT * FROM Posting " +
"ORDER BY status ASC,quality_score DESC " +
"LIMIT %d OFFSET %d" % (int(num), int(start)))
result_set = q.fetch(num)
reslist = []
for result in result_set:
result.key = str(result.key())
result.listing_fmtd = re.sub(r'><', '-qbr--', result.listing_xml);
result.listing_fmtd = re.sub(r'(<?/[a-zA-Z]+-qbr--)+', '-qbr--', result.listing_fmtd);
result.listing_fmtd = re.sub(r'>', ': ', result.listing_fmtd);
result.listing_fmtd = re.sub(r'-qbr--', '<br/>', result.listing_fmtd)
result.listing_fmtd = re.sub(r'(<br/>)+', '<br/>', result.listing_fmtd)
result.status_char = result.statusChar()
reslist.append(result)
return reslist
def create_from_xml(xml):
try:
dom = minidom.parseString(xml)
except:
return ""
posting = Posting(listing_xml=xml)
posting.title = utils.xml_elem_text(dom, "title", '')
logging.debug("create_from_xml: title="+posting.title)
logging.debug("create_from_xml: xml="+xml)
posting.description = utils.xml_elem_text(dom, "description", '')
try:
start_date = datetime.strptime(utils.xml_elem_text(
dom, "startDate", ''), "%Y-%m-%d")
posting.start_date = start_date.date()
except:
pass
# ignore bad start date
posting.item_id = hashlib.md5(xml+str(posting.creation_time)).hexdigest()
posting.put()
return posting.key()
argnames = {
"title":1, "description":1, "skills":1, "virtual":1, "addr1":1, "addrname1":1,
"sponsoringOrganizationName":1, "openEnded":1, "startDate":1,
"startTime":1, "endTime":1, "endDate":1, "contactNoneNeeded":1,
"contactEmail":1, "contactPhone":1, "contactName":1, "detailURL":1,
"weeklySun":1, "weeklyMon":1, "weeklyTue":1, "weeklyWed":1, "weeklyThu":1,
"weeklyFri":1, "weeklySat":1, "biweeklySun":1, "biweeklyMon":1,
"biweeklyTue":1, "biweeklyWed":1, "biweeklyThu":1, "biweeklyFri":1,
"biweeklySat":1, "recurrence":1, "audienceAll":1, "audienceAge":1,
"minAge":1, "audienceSexRestricted":1, "sexRestrictedTo":1,
"commitmentHoursPerWeek":1, "city":1, "region":1, "postalCode":1,
"country":1, "street1":1, "street2":1, "location_string":1
}
# TODO: replace with a better parser-- after wasting hours, I gave up
# on strptime(). Do not add to utils.py -- this is a bad hack
def parseTimestamp(dateStr, timeStr):
dateStr = dateStr.strip()
grp = re.match(r'(\d?\d)[/-]?(\d?\d)[/-]?(\d\d\d\d)', dateStr)
if grp:
month = int(grp.group(1))
day = int(grp.group(2))
year = int(grp.group(3))
else:
grp = re.match(r'(\d?\d)[/-]?(\d?\d)[/-]?(\d\d)', dateStr)
if grp:
month = int(grp.group(1))
day = int(grp.group(2))
year = int(grp.group(3)) + 1900
else:
grp = re.match(r'(\d\d\d\d)[/-]?(\d\d)[/-]?(\d\d)', dateStr)
if grp:
year = int(grp.group(1))
month = int(grp.group(2))
day = int(grp.group(3))
else:
return None
hour = minute = 0
timeStr = timeStr.strip().upper()
grp = re.match(r'(\d?\d):(\d\d) *(AM|PM)?', timeStr)
if grp:
hour = int(grp.group(1))
minute = int(grp.group(2))
ampm = grp.group(3)
if ampm == "PM":
hour += 12
else:
return None
try:
return datetime(year, month, day, hour, minute, 0)
except:
return None
def cleanup_args(vals):
# keep only known argnames
for key in vals:
if key in argnames:
vals[key] = escape(vals[key])
#vals[key] = re.sub(r'(<!\[CDATA\[\|\]\]>)', r'', vals[key])
else:
vals[key] = ""
for key in argnames:
if key not in vals:
vals[key] = ""
# blank-out incompatible fields
if vals["virtual"] != "No":
vals["virtual"] = "Yes"
vals["addr1"] = vals["addrname1"] = ""
if vals["openEnded"] != "No":
vals["openEnded"] = "Yes"
vals["startDate"] = vals["startTime"] = ""
vals["endDate"] = vals["endTime"] = ""
# footprint isn't very interesting when it comes to gender
if len(vals["sexRestrictedTo"]) < 1:
vals["sexRestrictedTo"] = ""
elif vals["sexRestrictedTo"][0].upper() == "M":
vals["sexRestrictedTo"] = "M"
elif vals["sexRestrictedTo"][0].upper() == "F":
vals["sexRestrictedTo"] = "F"
else:
vals["sexRestrictedTo"] = ""
# once, one-time or weekly, then blank-out biweekly
if (vals["recurrence"] == "Weekly" or
vals["recurrence"] == "No" or
vals["recurrence"] == "Daily"):
for arg in argnames:
if arg.find("biweekly") == 0:
vals[arg] == ""
# once, one-time or biweekly, then blank-out weekly
if (vals["recurrence"] == "BiWeekly" or
vals["recurrence"] == "No" or
vals["recurrence"] == "Daily"):
for arg in argnames:
if arg.find("weekly") == 0:
vals[arg] == ""
def add_new_fields(vals, newvals):
if vals["country"] == "":
vals["country"] = "US"
addr = vals["street1"]
addr += " "+vals["street2"]
addr += " "+vals["city"]
addr += " "+vals["region"]
addr += " "+vals["country"]
newvals["complete_addr"] = addr
logging.debug("post: geocoding "+addr)
latlong = geocode.geocode(addr)
logging.debug("post: latlong="+latlong)
if latlong == "":
newvals["latitude"] = newvals["longitude"] = ""
else:
newvals["latitude"],newvals["longitude"] = latlong.split(",")[:2]
newvals["parsedStartDate"] = newvals["parsedStartTime"] = ""
newvals["parsedEndDate"] = newvals["parsedEndTime"] = ""
if vals["openEnded"] == "No":
startTs = parseTimestamp(vals["startDate"], vals["startTime"])
if startTs:
newvals["parsedStartDate"] = startTs.strftime("%Y-%m-%d")
newvals["parsedStartTime"] = startTs.strftime("%H:%M:%S")
endTs = parseTimestamp(vals["endDate"], vals["endTime"])
if endTs:
newvals["parsedEndDate"] = endTs.strftime("%Y-%m-%d")
newvals["parsedEndTime"] = endTs.strftime("%H:%M:%S")
newvals["computedMinAge"] = 0
if vals["audienceAge"] == "seniors":
newvals["computedMinAge"] = 60
elif vals["audienceAge"] == "teens":
newvals["computedMinAge"] = 13
elif vals["audienceAge"] == "anyage":
newvals["computedMinAge"] = 0
else:
try:
newvals["computedMinAge"] = int(vals["minAge"])
except:
newvals["computedMinAge"] = 0
try:
newvals["computedCommitmentHoursPerWeek"] = int(vals["commitmentHoursPerWeek"])
if newvals["computedCommitmentHoursPerWeek"] < 0:
newvals["computedCommitmentHoursPerWeek"] = 0
except:
newvals["computedCommitmentHoursPerWeek"] = 0
def create_from_args(vals, computed_vals):
# note: don't need to worry (much) about hacked-forms because we're
# using CAPTCHA to avoid bot submissions.
cleanup_args(vals)
add_new_fields(vals, computed_vals)
if vals["virtual"] == 'No' and computed_vals["latitude"] == "":
return 402, "", "cannot find address: '"+computed_vals["complete_addr"]+"'"
xml = "<VolunteerOpportunity>"
if vals["recaptcha_response_field"] == "test":
# basic security measure
xml += "<isTest>Yes</isTest>"
vals["title"] = "T:" + vals["title"]
vals["description"] = "TEST DELETEME: " + vals["description"]
# TODO: organization
#xml += "<volunteerOpportunityID>%d</volunteerOpportunityID>" % (item_id)
#xml += "<sponsoringOrganizationIDs><sponsoringOrganizationID>%d</sponsoringOrganizationID></sponsoringOrganizationIDs>" % (item_id)
#xml += "<volunteerHubOrganizationIDs><volunteerHubOrganizationID>%s</volunteerHubOrganizationID></volunteerHubOrganizationIDs>" % ("")
xml += "<title>%s</title>" % (vals["title"])
xml += "<description>%s</description>" % (vals["description"])
xml += "<skills>%s</skills>" % (vals["skills"])
xml += "<minimumAge>%s</minimumAge>" % (str(computed_vals["computedMinAge"]))
xml += "<detailURL>%s</detailURL>" % (vals["detailURL"])
xml += "<locations>"
xml += "<location>"
xml += "<name>%s</name>" % (vals["addrname1"])
xml += "<city>%s</city>" % (vals["city"])
xml += "<region>%s</region>" % (vals["region"])
xml += "<postalCode>%s</postalCode>" % (vals["postalCode"])
xml += "<country>%s</country>" % (vals["country"])
xml += "<latitude>%s</latitude>" % (computed_vals["latitude"])
xml += "<longitude>%s</longitude>" % (computed_vals["longitude"])
xml += "</location>"
xml += "</locations>"
# TODO: category tags
#xml += "<categoryTags>"
#xml += "<categoryTag>Community</categoryTag>"
#xml += "</categoryTags>"
xml += "<dateTimeDurations>"
xml += "<dateTimeDuration>"
xml += "<openEnded>%s</openEnded>" % (vals["openEnded"])
if vals["openEnded"] == "No":
xml += "<startDate>%s</startDate>" % (computed_vals["startDate"])
xml += "<startTime>%s</startTime>" % (computed_vals["startTime"])
xml += "<endDate>%s</endDate>" % (computed_vals["endDate"])
xml += "<endTime>%s</endTime>" % (computed_vals["endTime"])
xml += "<commitmentHoursPerWeek>%d</commitmentHoursPerWeek>" % \
(computed_vals["computedCommitmentHoursPerWeek"])
xml += "</dateTimeDuration>"
xml += "</dateTimeDurations>"
xml += "</VolunteerOpportunity>"
#logging.info(re.sub(r'><', '>\n<', xml))
item_id = create_from_xml(xml)
return 200, item_id, xml
def createTestDatabase():
id1 = create_from_xml("<VolunteerOpportunity><volunteerOpportunityID>1001</volunteerOpportunityID><sponsoringOrganizationIDs><sponsoringOrganizationID>1</sponsoringOrganizationID></sponsoringOrganizationIDs><volunteerHubOrganizationIDs><volunteerHubOrganizationID>3011</volunteerHubOrganizationID></volunteerHubOrganizationIDs><title>Be a Business Mentor - Trenton, NJ & Beyond</title><dateTimeDurations><dateTimeDuration><openEnded>Yes</openEnded><duration>P6M</duration><commitmentHoursPerWeek>4</commitmentHoursPerWeek></dateTimeDuration></dateTimeDurations><locations><location><city>Trenton</city><region>NJ</region><postalCode>08608</postalCode></location><location><city>Berkeley</city><region>CA</region><postalCode>94703</postalCode></location><location><city>Santa Cruz</city><region>CA</region><postalCode>95062</postalCode></location></locations><categoryTags><categoryTag>Community</categoryTag><categoryTag>Computers & Technology</categoryTag><categoryTag>Employment</categoryTag></categoryTags><minimumAge>21</minimumAge><skills>In order to maintain the integrity of the MicroMentor program, we require that our Mentor volunteers have significant business experience and expertise, such as: 3 years of business ownership experience</skills><detailURL>http://www.volunteermatch.org/search/index.jsp?l=08540</detailURL><description>This is where you come in. Simply by sharing your business know-how, you can make a huge difference in the lives of entrepreneurs from low-income and marginalized communities, helping them navigate the opportunities and challenges of running a business and improving their economic well-being and creating new jobs where they are most needed.</description></VolunteerOpportunity>")
id2 = create_from_xml("<VolunteerOpportunity><volunteerOpportunityID>2001</volunteerOpportunityID><sponsoringOrganizationIDs><sponsoringOrganizationID>2</sponsoringOrganizationID></sponsoringOrganizationIDs><title>DODGEBALL TO HELP AREA HUNGRY</title><dateTimeDurations><dateTimeDuration><openEnded>No</openEnded><startDate>2009-02-22</startDate><endDate>2009-02-22</endDate><startTime>18:45:00</startTime><endTime>21:00:00</endTime></dateTimeDuration><dateTimeDuration><openEnded>No</openEnded><startDate>2009-02-27</startDate><endDate>2009-02-27</endDate><startTime>18:45:00</startTime><endTime>21:00:00</endTime></dateTimeDuration></dateTimeDurations><locations><location><city>West Windsor</city><region>NJ</region><postalCode>08550</postalCode></location></locations><audienceTags><audienceTag>Teens</audienceTag><audienceTag>High School Students</audienceTag></audienceTags><categoryTags><categoryTag>Community</categoryTag><categoryTag>Homeless & Hungry</categoryTag><categoryTag>Hunger</categoryTag></categoryTags><minimumAge>14</minimumAge><skills>Must be in High School</skills><detailURL>http://www.volunteermatch.org/search/opp451561.jsp</detailURL><description>The Mercer County Quixote Quest Teen Volunteer Club is hosting a FUN Dodgeball Tournament at Mercer County College on Sunday afternoon, February 22nd. The proceeds from the event will bebefit the Trenton Area Soup Kitchen. Teens are invited to enter a team of six...with at least three female players (3 guys and 3 girls or more girls). Each team playing will bring a $50 entry fee and a matching sponsor donation of $50. (Total of $100 from each team).</description><lastUpdated olsonTZ=\"America/Denver\">2009-02-02T19:02:01</lastUpdated></VolunteerOpportunity>")
id3 = create_from_xml("<VolunteerOpportunity><volunteerOpportunityID>2002</volunteerOpportunityID><sponsoringOrganizationIDs><sponsoringOrganizationID>2</sponsoringOrganizationID></sponsoringOrganizationIDs><title>YOUNG ADULT TO HELP GUIDE MERCER COUNTY TEEN VOLUNTEER CLUB</title><volunteersNeeded>3</volunteersNeeded><dateTimeDurations><dateTimeDuration><openEnded>No</openEnded><startDate>2009-01-01</startDate><endDate>2009-05-31</endDate><iCalRecurrence>FREQ=WEEKLY;INTERVAL=2</iCalRecurrence><commitmentHoursPerWeek>2</commitmentHoursPerWeek></dateTimeDuration></dateTimeDurations><locations><location><city>Mercer County</city><region>NJ</region><postalCode>08610</postalCode></location></locations><audienceTags><audienceTag>Teens</audienceTag></audienceTags><categoryTags><categoryTag>Community</categoryTag><categoryTag>Children & Youth</categoryTag></categoryTags><skills>Be interested in promoting youth volunteerism. Be available two Tuesday evenings per month.</skills><detailURL>http://www.volunteermatch.org/search/opp200517.jsp</detailURL><description>Quixote Quest is a volunteer club for teens who have a passion for community service. The teens each volunteer for their own specific cause. Twice monthly, the club meets. At the club meetings the teens from different high schools come together for two hours to talk about their volunteer experiences and spend some hang-out time together that helps them bond as fraternity...family. Quixote Quest is seeking young adults roughly between 20 and 30 years of age who would be interested in being a guide and advisor to the teens during these two evening meetings a month.</description><lastUpdated olsonTZ=\"America/Denver\">2008-12-02T19:02:01</lastUpdated></VolunteerOpportunity>")
return (id1,id2,id3)
| Python |
# Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
low-level routines for querying Google Base and processing the results.
Please don't call this directly-- instead call search.py
"""
import datetime
import time
import re
import urllib
import logging
import traceback
from google.appengine.api import memcache
from google.appengine.api import urlfetch
from xml.dom import minidom
import api
import geocode
import models
import modelutils
import posting
import searchresult
import utils
RESULT_CACHE_TIME = 900 # seconds
RESULT_CACHE_KEY = 'searchresult:'
# google base has a bug where negative numbers aren't indexed correctly,
# so we load the data with only positive numbers for lat/long.
# this should be a big number and of course must be sync'd with the
# value in datahub/*
GBASE_LOC_FIXUP = 1000
# Date format pattern used in date ranges.
DATE_FORMAT_PATTERN = re.compile(r'\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}')
# max number of results to ask from Base (for latency-- and correctness?)
BASE_MAX_RESULTS = 1000
# what base customer/author ID did we load the data under?
BASE_CUSTOMER_ID = 5663714
def base_argname(name):
"""base-sepcific urlparams all start with "base_" to avoid conflicts with
non-base-specific args, and also to signal to appwriters that they're base
specific and to cautious about their usage."""
return "base_" + name
def base_orderby_arg(args):
"""convert from footprint ranking/sorting order to Base order."""
# TODO: implement other scenarios for orderby
if args[api.PARAM_SORT] == "m":
# newest
return "modification_time"
# "relevancy" is the Base default
return "relevancy"
def base_restrict_str(key, val=None):
"""convert from key=val to Base restrict syntax."""
res = '+[' + urllib.quote_plus(re.sub(r'_', r' ', key))
if val != None:
res += ':' + urllib.quote_plus(str(val))
return res + ']'
def form_base_query(args):
"""ensure args[] has all correct and well-formed members and
return a base query string."""
logging.debug("form_base_query: "+str(args))
base_query = ""
if api.PARAM_Q in args and args[api.PARAM_Q] != "":
base_query += urllib.quote_plus(args[api.PARAM_Q])
if api.PARAM_VOL_STARTDATE in args or api.PARAM_VOL_ENDDATE in args:
startdate = None
if api.PARAM_VOL_STARTDATE in args and args[api.PARAM_VOL_STARTDATE] != "":
try:
startdate = datetime.datetime.strptime(
args[api.PARAM_VOL_STARTDATE].strip(), "%Y-%m-%d")
except:
logging.error("malformed start date: %s" %
args[api.PARAM_VOL_STARTDATE])
if not startdate:
# note: default vol_startdate is "tomorrow"
# in base, event_date_range YYYY-MM-DDThh:mm:ss/YYYY-MM-DDThh:mm:ss
# appending "Z" to the datetime string would mean UTC
startdate = datetime.date.today() + datetime.timedelta(days=1)
args[api.PARAM_VOL_STARTDATE] = startdate.strftime("%Y-%m-%d")
enddate = None
if api.PARAM_VOL_ENDDATE in args and args[api.PARAM_VOL_ENDDATE] != "":
try:
enddate = datetime.datetime.strptime(
args[api.PARAM_VOL_ENDDATE].strip(), "%Y-%m-%d")
except:
logging.error("malformed end date: %s" % args[api.PARAM_VOL_ENDDATE])
if not enddate:
enddate = datetime.date(startdate.year, startdate.month, startdate.day)
enddate = enddate + datetime.timedelta(days=1000)
args[api.PARAM_VOL_ENDDATE] = enddate.strftime("%Y-%m-%d")
daterangestr = '%s..%s' % (args[api.PARAM_VOL_STARTDATE],
args[api.PARAM_VOL_ENDDATE])
base_query += base_restrict_str("event_date_range", daterangestr)
if api.PARAM_VOL_PROVIDER in args and args[api.PARAM_VOL_PROVIDER] != "":
if re.match(r'[a-zA-Z0-9:/_. -]+', args[api.PARAM_VOL_PROVIDER]):
base_query += base_restrict_str("feed_providername",
args[api.PARAM_VOL_PROVIDER])
else:
# illegal providername
# TODO: throw 500
logging.error("illegal providername: " + args[api.PARAM_VOL_PROVIDER])
# TODO: injection attack on sort
if api.PARAM_SORT not in args:
args[api.PARAM_SORT] = "r"
# Base location datatype is buggy-- use inequality search on lat/long
#base_query += base_restrict_str("location", '@"%s" + %dmi' % \
# (args[api.PARAM_VOL_LOC],
# args[api.PARAM_VOL_DIST]))
if (args["lat"] != "" and args["long"] != ""):
logging.debug("args[lat]="+args["lat"]+" args[long]="+args["lat"])
if api.PARAM_VOL_DIST not in args or args[api.PARAM_VOL_DIST] == "":
args[api.PARAM_VOL_DIST] = 25
args[api.PARAM_VOL_DIST] = int(str(args[api.PARAM_VOL_DIST]))
if args[api.PARAM_VOL_DIST] < 1:
args[api.PARAM_VOL_DIST] = 1
lat, lng = float(args["lat"]), float(args["long"])
if (lat < 0.5 and lng < 0.5):
base_query += "[latitude%%3C%%3D0.5][longitude%%3C%%3D0.5]"
else:
dist = float(args[api.PARAM_VOL_DIST])
base_query += "[latitude%%3E%%3D%.2f]" % (lat+GBASE_LOC_FIXUP - dist/69.1)
base_query += "[latitude%%3C%%3D%.2f]" % (lat+GBASE_LOC_FIXUP + dist/69.1)
base_query += "[longitude%%3E%%3D%.2f]" % (lng+GBASE_LOC_FIXUP - dist/50)
base_query += "[longitude%%3C%%3D%.2f]" % (lng+GBASE_LOC_FIXUP + dist/50)
# Base URL for snippets search on Base.
# Docs: http://code.google.com/apis/base/docs/2.0/attrs-queries.html
# TODO: injection attack on backend
if "backend" not in args:
args["backend"] = "http://www.google.com/base/feeds/snippets"
cust_arg = base_argname("customer")
if cust_arg not in args:
args[cust_arg] = BASE_CUSTOMER_ID
base_query += base_restrict_str("customer_id", int(args[cust_arg]))
#base_query += base_restrict_str("detailurl")
if api.PARAM_START not in args:
args[api.PARAM_START] = 1
# TODO: remove me-- hack to forcibly remove DNC listings for now
# (Base hasn't caught up to the takedown, not sure why...)
#base_query += '+-barackobama'
return base_query
# note: many of the XSS and injection-attack defenses are unnecessary
# given that the callers are also protecting us, but I figure better
# safe than sorry, and defense-in-depth.
def search(args):
"""run a Google Base search."""
def have_valid_query(args):
""" make sure we were given a value for at least one of these arguments """
valid_query = False
api_list = [api.PARAM_Q,
api.PARAM_TIMEPERIOD,
api.PARAM_VOL_LOC,
api.PARAM_VOL_STARTDATE,
api.PARAM_VOL_ENDDATE,
api.PARAM_VOL_DURATION,
api.PARAM_VOL_PROVIDER,
api.PARAM_VOL_STARTDAYOFWEEK]
for param in api_list:
if param in args and args[param]:
if param == api.PARAM_VOL_LOC:
# vol_loc must render a lat, long pair
if not args["lat"] or not args["long"]:
continue
valid_query = True
break
return valid_query
base_query = form_base_query(args)
query_url = args["backend"]
num_to_fetch = int(args[api.PARAM_START])
num_to_fetch += int(args[api.PARAM_NUM] * args[api.PARAM_OVERFETCH_RATIO])
if num_to_fetch > BASE_MAX_RESULTS:
num_to_fetch = BASE_MAX_RESULTS
query_url += "?max-results=" + str(num_to_fetch)
# We don't set "&start-index=" because that will interfere with
# deduping + pagination. Since we merge the results here in the
# app, we must perform de-duping starting at index zero every time
# in order to get reliable pagination.
query_url += "&orderby=" + base_orderby_arg(args)
query_url += "&content=" + "all"
query_url += "&bq=" + base_query
if not have_valid_query(args):
# no query + no location = no results
result_set = searchresult.SearchResultSet(urllib.unquote(query_url),
query_url,
[])
logging.debug("Base not called: no query given")
result_set.query_url = query_url
result_set.args = args
result_set.num_results = 0
result_set.estimated_results = 0
result_set.fetch_time = 0
result_set.parse_time = 0
return result_set
logging.debug("calling Base: "+query_url)
results = query(query_url, args, False)
logging.debug("Base call done: "+str(len(results.results))+
" results, fetched in "+str(results.fetch_time)+" secs,"+
" parsed in "+str(results.parse_time)+" secs.")
# Base doesn't implement day-of-week filtering
if (api.PARAM_VOL_STARTDAYOFWEEK in args and
args[api.PARAM_VOL_STARTDAYOFWEEK] != ""):
startday = args[api.PARAM_VOL_STARTDAYOFWEEK]
for i, res in enumerate(results):
dow = str(res.startdate.strftime("%w"))
if startday.find(dow) < 0:
del results[i]
return results
def query(query_url, args, cache):
"""run the actual Base query (no filtering or sorting)."""
result_set = searchresult.SearchResultSet(urllib.unquote(query_url),
query_url,
[])
result_set.query_url = query_url
result_set.args = args
result_set.fetch_time = 0
result_set.parse_time = 0
fetch_start = time.time()
fetch_result = urlfetch.fetch(query_url,
deadline = api.CONST_MAX_FETCH_DEADLINE)
fetch_end = time.time()
result_set.fetch_time = fetch_end - fetch_start
if fetch_result.status_code != 200:
logging.error("Base fetch returned status code "+
str(fetch_result.status_code)+
" url="+query_url)
return result_set
result_content = fetch_result.content
parse_start = time.time()
# undo comma encoding -- see datahub/footprint_lib.py
result_content = re.sub(r';;', ',', result_content)
dom = minidom.parseString(result_content)
elems = dom.getElementsByTagName('entry')
for i, entry in enumerate(elems):
# Note: using entry.getElementsByTagName('link')[0] isn't very stable;
# consider iterating through them for the one where rel='alternate' or
# whatever the right thing is.
url = utils.xml_elem_text(entry, 'g:detailurl', '')
if not url:
logging.warning("skipping Base record %d: detailurl is missing..." % i)
continue
# ID is the 'stable id' of the item generated by base.
# Note that this is not the base url expressed as the Atom id element.
item_id = utils.xml_elem_text(entry, 'g:id', '')
# Base URL is the url of the item in base, expressed with the Atom id tag.
base_url = utils.xml_elem_text(entry, 'id', '')
snippet = utils.xml_elem_text(entry, 'g:abstract', '')
title = utils.xml_elem_text(entry, 'title', '')
location = utils.xml_elem_text(entry, 'g:location_string', '')
res = searchresult.SearchResult(url, title, snippet, location, item_id,
base_url)
# TODO: escape?
res.provider = utils.xml_elem_text(entry, 'g:feed_providername', '')
res.orig_idx = i+1
res.latlong = ""
latstr = utils.xml_elem_text(entry, 'g:latitude', '')
longstr = utils.xml_elem_text(entry, 'g:longitude', '')
if latstr and longstr and latstr != "" and longstr != "":
latval = float(latstr)
longval = float(longstr)
# divide by two because these can be negative numbers
if latval > GBASE_LOC_FIXUP/2:
latval -= GBASE_LOC_FIXUP
if longval > GBASE_LOC_FIXUP/2:
longval -= GBASE_LOC_FIXUP
res.latlong = str(latval) + "," + str(longval)
# TODO: remove-- working around a DB bug where all latlongs are the same
if "geocode_responses" in args:
res.latlong = geocode.geocode(location,
args["geocode_responses"]!="nocache" )
# res.event_date_range follows one of these two formats:
# <start_date>T<start_time> <end_date>T<end_time>
# <date>T<time>
res.event_date_range = utils.xml_elem_text(entry, 'g:event_date_range' '')
res.startdate = datetime.datetime.strptime("2000-01-01", "%Y-%m-%d")
res.enddate = datetime.datetime.strptime("2038-01-01", "%Y-%m-%d")
if res.event_date_range:
match = DATE_FORMAT_PATTERN.findall(res.event_date_range)
if not match:
logging.warning('skipping Base record %d: bad date range: %s for %s' %
(i, res.event_date_range, url))
continue
else:
# first match is start date/time
startdate = datetime.datetime.strptime(match[0], '%Y-%m-%dT%H:%M:%S')
# last match is either end date/time or start/date time
enddate = datetime.datetime.strptime(match[-1], '%Y-%m-%dT%H:%M:%S')
# protect against absurd dates
if startdate > res.startdate:
res.startdate = startdate
if enddate < res.enddate:
res.enddate = enddate
# posting.py currently has an authoritative list of fields in "argnames"
# that are available to submitted events which may later appear in GBase
# so with a few exceptions we want those same fields to become
# attributes of our result object
except_names = ["title", "description"]
for name in posting.argnames:
if name not in except_names:
# these attributes are likely to become part of the "g" namespace
# http://base.google.com/support/bin/answer.py?answer=58085&hl=en
setattr(res, name, utils.xml_elem_text(entry, "g:" + name, ''))
result_set.results.append(res)
if cache and res.item_id:
key = RESULT_CACHE_KEY + res.item_id
memcache.set(key, res, time=RESULT_CACHE_TIME)
result_set.num_results = len(result_set.results)
result_set.estimated_results = int(
utils.xml_elem_text(dom, "openSearch:totalResults", "0"))
parse_end = time.time()
result_set.parse_time = parse_end - parse_start
return result_set
def get_from_ids(ids):
"""Return a result set containing multiple results for multiple ids.
Args:
ids: List of stable IDs of volunteer opportunities.
Returns:
searchresult.SearchResultSet with just the entries in ids.
"""
result_set = searchresult.SearchResultSet('', '', [])
# First get all that we can from memcache
results = {}
try:
# get_multi returns a dictionary of the keys and values that were present
# in memcache. Even with the key_prefix specified, that key_prefix won't
# be on the keys in the returned dictionary.
hits = memcache.get_multi(ids, RESULT_CACHE_KEY)
except:
# TODO(mblain): Scope to only 'memcache down' exception.
logging.exception('get_from_ids: ignoring busted memcache. stack: %s',
''.join(traceback.format_stack()))
temp_results_dict = {}
for key in hits:
result = hits[key]
temp_results_dict[result.item_id] = result
# OK, we've collected what we can from memcache. Now look up the rest.
# Find the Google Base url from the datastore, then look that up in base.
missing_ids = []
for item_id in ids:
if not item_id in hits:
missing_ids.append(item_id)
datastore_results = modelutils.get_by_ids(models.VolunteerOpportunity,
missing_ids)
datastore_missing_ids = []
for item_id in ids:
if not item_id in datastore_results:
datastore_missing_ids.append(item_id)
if datastore_missing_ids:
logging.warning('Could not find entry in datastore for ids: %s' %
datastore_missing_ids)
# Bogus args for search. TODO: Remove these, why are they needed above?
args = {}
args[api.PARAM_VOL_STARTDATE] = (datetime.date.today() +
datetime.timedelta(days=1)).strftime("%Y-%m-%d")
datetm = time.strptime(args[api.PARAM_VOL_STARTDATE], "%Y-%m-%d")
args[api.PARAM_VOL_ENDDATE] = (datetime.date(datetm.tm_year, datetm.tm_mon,
datetm.tm_mday) + datetime.timedelta(days=60))
# TODO(mblain): Figure out how to pull in multiple base entries in one call.
for (item_id, volunteer_opportunity_entity) in datastore_results.iteritems():
if not volunteer_opportunity_entity.base_url:
logging.warning('no base_url in datastore for id: %s' % item_id)
continue
temp_results = query(volunteer_opportunity_entity.base_url, args, True)
if not temp_results.results:
# The base URL may have changed from under us. Oh well.
# TODO: "info" is not defined so this logging line breaks.
# logging.warning('Did not get results from base. id: %s base_url: %s '
# 'Last update: %s Previous failure: %s' %
# (id, info.base_url, info.last_base_url_update,
# info.last_base_url_update_failure))
volunteer_opportunity_entity.base_url_failure_count += 1
volunteer_opportunity_entity.last_base_url_update_failure = \
datetime.datetime.now()
volunteer_opportunity_entity.put()
continue
if temp_results.results[0].item_id != item_id:
logging.error('First result is not expected result. '
'Expected: %s Found: %s. len(results): %s' %
(item_id, temp_results.results[0].item_id, len(results)))
# Not sure if we should touch the VolunteerOpportunity or not.
continue
temp_result = temp_results.results[0]
temp_results_dict[temp_result.item_id] = temp_result
# Our temp result set should now contain both stuff that was looked up from
# cache as well as stuff that got fetched directly from Base. Now order
# the events according to the original list of id's.
# First reverse the list of id's, so events come out in the right order
# after being prepended to the events list.
ids.reverse()
for id in ids:
result = temp_results_dict.get(id, None)
if result:
result_set.results.insert(0, result)
return result_set
| Python |
MAPS_API_KEYS = {}
MAPS_API_KEYS['www.adamsah.net:8080'] = 'ABQIAAAAxq97AW0x5_CNgn6-nLxSrxQPiECf40c9sk8_oYaM1tejJgt_DBQGX9FrJhDYEm_Q_8aqbVKUzollqg'
MAPS_API_KEYS['www.adamsah.net:8081'] = 'ABQIAAAAxq97AW0x5_CNgn6-nLxSrxSatViGQnF70MoboVDRRVzoLj4T8hTGWmAjUKagGrnWr-xTwSWv4XFuiw'
MAPS_API_KEYS['www.adamsah.net:8082'] = 'ABQIAAAAxq97AW0x5_CNgn6-nLxSrxTGEVewtGazShFSG9KX3KfJ-OzRuxRYmck9mME2a1DVHyoL1GbqprKLeA'
MAPS_API_KEYS['www.adamsah.net:8083'] = 'ABQIAAAAxq97AW0x5_CNgn6-nLxSrxTaJ1NpVtTebRGVkFgIOxwdBr6gvhSK1BMuNuwwydj3shBNvtPyShE9CA'
| Python |
# Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
geocode client, which uses Google Maps API
"""
import re
import urllib
import logging
import time
from datetime import datetime
from google.appengine.api import urlfetch
from google.appengine.api import memcache
import api
def is_latlong(instr):
"""check whether a string is a valid lat-long."""
return (re.match(r'^\s*[0-9.+-]+\s*,\s*[0-9.+-]+\s*$', instr) != None)
def is_latlongzoom(instr):
"""check whether a string is a valid lat-long-zoom."""
return (re.match(r'^\s*[0-9.+-]+\s*,\s*[0-9.+-]+\s*,\s*[0-9.+-]+\s*$', instr) != None)
def geocode(addr, usecache=True, retries=4):
"""convert a human-readable address into a "lat,long" value (string)."""
loc = addr.lower().strip()
# already geocoded-- just return
if is_latlongzoom(loc):
return loc
if is_latlong(loc):
# regexp allow missing comma
# TODO: pick a smart default zoom, depending on population density.
return loc+",4"
loc = re.sub(r'^[^0-9a-z]+', r'', loc)
loc = re.sub(r'[^0-9a-z]+$', r'', loc)
loc = re.sub(r'\s\s+', r' ', loc)
logging.debug("geocode: loc="+loc)
memcache_key = "geocode:"+loc
val = memcache.get(memcache_key)
if usecache and val:
logging.debug("geocode: cache hit loc="+loc+" val="+val)
return val
params = urllib.urlencode(
{'q':loc.lower(), 'output':'csv',
'oe':'utf8', 'sensor':'false', 'gl':'us',
'key':'ABQIAAAAxq97AW0x5_CNgn6-nLxSrxQuOQhskTx7t90ovP5xOuY'+\
'_YrlyqBQajVan2ia99rD9JgAcFrdQnTD4JQ'})
fetchurl = "http://maps.google.com/maps/geo?%s" % params
logging.debug("geocode: cache miss, trying "+fetchurl)
fetch_result = urlfetch.fetch(fetchurl,
deadline = api.CONST_MAX_FETCH_DEADLINE)
if fetch_result.status_code != 200:
# fail and also don't cache
return ""
res = fetch_result.content
if "," not in res:
# fail and also don't cache
return ""
try:
# pylint: disable-msg=W0612
respcode, zoom, lat, lng = res.split(",")
except:
logging.error(str(datetime.now())+
": unparseable geocoder response: "+res[0:80])
respcode, zoom, lat, lng = 999, 0, 0, 0
respcode = int(respcode)
if respcode == 500 or respcode == 620:
logging.warn(str(datetime.now())+"geocoder quota exceeded-- sleeping...")
time.sleep(1)
return geocode(addr, usecache, retries-1)
# these results get cached
val = ""
if respcode == 200:
val = lat+","+lng+","+zoom
memcache.set(memcache_key, val)
return val
| Python |
# Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
paths used in the app
"""
URL_HOME = '/'
URL_OLD_HOME = '/home'
URL_CONSUMER_UI_SEARCH = '/search'
URL_CONSUMER_UI_SEARCH_REDIR = '/search_form'
URL_API_SEARCH = '/api/volopps'
URL_LEGACY_API_SEARCH = '/api/search'
URL_MY_EVENTS = '/myevents'
URL_FRIENDS = '/friends'
URL_POST = '/post'
URL_ADMIN = '/admin'
URL_MODERATE = '/moderate'
URL_MODERATE_BLACKLIST = '/moderateblacklist'
URL_UI_SNIPPETS = '/ui_snippets'
URL_UI_MY_SNIPPETS = '/ui_my_snippets'
URL_REDIRECT = '/url'
URL_DATAHUB_DASHBOARD = '/dashboard/datahub'
URL_ACTION = '/action' # User actions like starring
URL_PSA = '/psa' # Redirect to home page for tracking adsense psas
STATIC_CONTENT_LOCATION = 'http://footprint2009dev.googlecode.com/svn/trunk/frontend/html/'
# Mappings between appliation URLs (key) and static content
# files to fetch (STATIC_CONTENT_LOCATION + value).
# So, for example, the application URL '/about' maps to
# the remote URL 'http://code.google.com/.../trunk/frontend/html/about_us.html'
STATIC_CONTENT_FILES = {
'/about' : 'about_us.html',
'/privacypolicy' : 'privacy_policy.html',
'/contentpolicy' : 'content_policy.html',
'/spreadsheet' : 'spreadsheet.html',
'/publishers' : 'publishers.html',
'/help' : 'help.html',
'/faq' : 'faq.html',
'/tos' : 'tos.html',
'/api_tos' : 'api_tos.html',
'/apps' : 'apps.html',
'/dmca' : 'dmca.html',
'/docs/api.html' : 'api.html',
'/partner_terms' : 'partner_terms.html',
'/apps/gmail' : 'apps-gmail.html',
'/apps/typepad' : 'apps-typepad.html',
'/apps/blogger' : 'apps-blogger.html',
'/apps/googlesites' : 'apps-googlesites.html',
'/apps/wordpress' : 'apps-wordpress.html',
'/code' : 'code.html',
'/posting' : 'spreadsheet.html',
'/guide' : 'tour.html',
}
| Python |
# Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
main() for sheetchecker
"""
# view classes aren inherently not pylint-compatible
# pylint: disable-msg=C0103
# pylint: disable-msg=W0232
# pylint: disable-msg=E1101
# pylint: disable-msg=R0903
import logging
import re
import os
from urllib import unquote
from google.appengine.api import urlfetch
from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import run_wsgi_app
from google.appengine.ext.webapp import template
import sheetchecker.parse_gspreadsheet as parse_gspreadsheet
import geocode
CHECK_SHEET_TEMPLATE = "checksheet.html"
def render_template(template_filename, template_values):
"""wrapper for template.render() which handles path."""
path = os.path.join(os.path.dirname(__file__),
template_filename)
rendered = template.render(path, template_values)
return rendered
class Check(webapp.RequestHandler):
"""prefix query on sheetchecker."""
def get(self):
"""HTTP get method."""
sheeturl = self.request.get('url')
template_values = {
"sheeturl" : sheeturl,
"sheetfeedurl" : "",
"msgs" : None,
"data" : None
}
match = re.search(r'key=([^& ]+)', sheeturl)
if match:
url = "http://spreadsheets.google.com/feeds/cells/"
url += match.group(1).strip() + "/1/public/basic"
fetch_result = urlfetch.fetch(url)
if fetch_result.status_code != 200:
self.response.out.write("<html><body>error fetching URL " +
url + "</body></html>")
return
contents = fetch_result.content
logging.info("fetched %d bytes: %s..." % (len(contents), contents[:80]))
data, msgs, addr_ar, urls_ar = parse_gspreadsheet.parse(contents)
logging.info("%d msgs in %s" % (len(msgs), sheeturl))
template_values["sheetfeedurl"] = url
template_values["msgs"] = msgs
template_values["data"] = data
template_values["addresses"] = addr_ar
template_values["urls"] = urls_ar
elif sheeturl != "":
self.response.out.write("<html><body>malformed sheet URL " +
" - missing &key=</body></html>")
return
self.response.out.write(template.render(CHECK_SHEET_TEMPLATE,
template_values))
class ValidateAddress(webapp.RequestHandler):
"""validate address"""
def get(self):
"""HTTP get method."""
addr = unquote(self.request.get('address'))
rsp = """
<html><body style="padding-top:1px;margin:0;font-size:10px;
font-weight:bold;text-align:center;background-color:%s;">%s
"""
if geocode.geocode(addr) == "":
rsp = rsp % ("#ff3333", "BAD")
else:
rsp = rsp % ("#33ff33", "OK")
self.response.out.write(rsp);
class ValidateURL(webapp.RequestHandler):
"""validate address"""
def get(self):
"""HTTP get method."""
url = unquote(self.request.get('url'))
if url == "":
success = False
else:
try:
fetch_result = urlfetch.fetch(url)
if fetch_result.status_code >= 400:
success = False
else:
success = True
except:
success = False
rsp = """
<html><body style="padding-top:1px;margin:0;font-size:10px;
font-weight:bold;text-align:center;background-color:%s;">%s
"""
if success:
rsp = rsp % ("#33ff33", "OK")
else:
rsp = rsp % ("#ff3333", "BAD")
self.response.out.write(rsp);
APP = webapp.WSGIApplication(
[ ('/sheetchecker/check', Check),
('/sheetchecker/validate_address', ValidateAddress),
('/sheetchecker/validate_url', ValidateURL)
],
debug=True)
def main():
"""main for standalone execution."""
run_wsgi_app(APP)
if __name__ == "__main__":
main()
| Python |
# Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
parser for feed stored in a google spreadsheet
(note that this is different from other parsers inasmuch as it
expects the caller to pass in the providerID and providerName)
"""
# TODO: share this code between frontend and datahub
# see http://code.google.com/p/footprint2009dev/issues/detail?id=150
# typical cell
#<entry>
#<id>http://spreadsheets.google.com/feeds/cells/pMY64RHUNSVfKYZKPoVXPBg
#/1/public/basic/R14C13</id>
#<updated>2009-04-28T03:29:56.957Z</updated>
#<category scheme='http://schemas.google.com/spreadsheets/2006'
#term='http://schemas.google.com/spreadsheets/2006#cell'/>
#<title type='text'>M14</title>
#<content type='text'>ginny@arthur.edu</content>
#<link rel='self' type='application/atom+xml' href='http://spreadsheets.
#google.com/feeds/cells/pMY64RHUNSVfKYZKPoVXPBg/1/public/basic/R14C13'/>
#</entry>
import re
import calendar
import time
import logging
MAX_BLANKROWS = 2
# TODO: right thing is to create a class for spreadsheets...
CURRENT_ROW = None
MESSAGES = []
DATA = None
HEADER_STARTCOL = None
HEADER_ROW = None
def parser_error(msg):
"""capture an error in its current context."""
global MESSAGES
if CURRENT_ROW != None:
msg = "row "+str(CURRENT_ROW)+": "+msg
msg += "<br/>\n starting with: "
for col in range(5):
val = cellval(CURRENT_ROW, col)
if val == None:
val = ""
msg += val+" | "
MESSAGES.append("ERROR: "+msg)
def raw_recordval(record, key):
"""get a cell value, or empty string."""
if key in record:
return str(record[key]).strip()
return ""
def recordval(record, key):
"""get a cell value, replacing whitespace with space."""
return re.sub(r'\s+', ' ', raw_recordval(record, key))
KNOWN_ORGS = {}
def get_dtval(record, field_name):
"""parse a field as a datetime."""
val = recordval(record, field_name)
if (val != "" and not re.match(r'\d\d?/\d\d?/\d\d\d\d', val)):
parser_error("bad value in "+field_name+": '"+val+"'-- try MM/DD/YYYY")
return val
def get_tmval(record, field_name):
"""parse a field as a time-of-day."""
val = recordval(record, field_name)
if (val != "" and not re.match(r'\d?\d:\d\d(:\d\d)?', val)):
parser_error("bad value in "+field_name+": '"+val+"'-- try HH:MM:SS")
return val
def get_boolval(record, field_name):
"""parse a field as a yes/no field-- note that blank is allowed."""
val = recordval(record, field_name)
if val.lower() not in ["y", "yes", "n", "no", ""]:
# TODO: support these alternates in the datahub!
parser_error("bad value in "+field_name+": '"+val+"'-- try 'Yes' or 'No'")
return val
def get_intval(record, field_name):
"""parse a field as a time-of-day."""
val = recordval(record, field_name)
if val != "" and not re.match('[0-9]+', val):
parser_error("bad value in "+field_name+": '"+val+"'-- try a number")
return val
def get_minlen(record, field_name, minlen):
"""parse a field as a minlen string."""
val = recordval(record, field_name)
if val == "":
parser_error("missing value in "+field_name+": '"+val+"'-- field required.")
elif len(val) < minlen:
parser_error("value not long enough in "+field_name+": '"+val+"'-- "+
"requires %d characters" % minlen)
return val
def get_blank(record, field_name, reason=" in this case."):
"""parse a field as a string that must be blank."""
val = recordval(record, field_name)
if val == "":
return ""
else:
parser_error("field "+field_name+" must be blank"+reason)
return val
def cellval(row, col):
"""get a single cell value."""
key = 'R'+str(row)+'C'+str(col)
if key not in DATA:
return None
return DATA[key]
def parse_gspreadsheet(instr, updated):
"""load a spreadsheet into a two dimensional array."""
# look ma, watch me parse XML a zillion times faster!
#<entry><id>http://spreadsheets.google.com/feeds/cells/pMY64RHUNSVfKYZKPoVXPBg
#/1/public/basic/R14C15</id><updated>2009-04-28T03:34:21.900Z</updated>
#<category scheme='http://schemas.google.com/spreadsheets/2006'
#term='http://schemas.google.com/spreadsheets/2006#cell'/><title type='text'>
#O14</title><content type='text'>http://www.fake.org/vol.php?id=4</content>
#<link rel='self' type='application/atom+xml'
#href='http://spreadsheets.google.com/feeds/cells/pMY64RHUNSVfKYZKPoVXPBg/1/
#public/basic/R14C15'/></entry>
regexp = re.compile('<entry>.+?(R(\d+)C(\d+))</id>'+
'<updated.*?>(.+?)</updated>.*?'+
'<content.*?>(.+?)</content>.+?</entry>', re.DOTALL)
maxrow = maxcol = 0
for match in re.finditer(regexp, instr):
lastupd = re.sub(r'([.][0-9]+)?Z?$', '', match.group(4)).strip()
#print "lastupd='"+lastupd+"'"
updated[match.group(1)] = lastupd.strip("\r\n\t ")
val = match.group(5).strip("\r\n\t ")
DATA[match.group(1)] = val
row = match.group(2)
if row > maxrow:
maxrow = row
col = match.group(3)
if col > maxcol:
maxcol = col
#print row, col, val
return maxrow, maxcol
def find_header_row(regexp_str):
"""location the header row in a footprint spreadsheet."""
regexp = re.compile(regexp_str, re.IGNORECASE|re.DOTALL)
global HEADER_ROW, HEADER_STARTCOL
HEADER_ROW = HEADER_STARTCOL = None
for row in range(20):
if HEADER_ROW:
break
for col in range(5):
val = cellval(row, col)
if (val and re.search(regexp, val)):
HEADER_ROW = row
HEADER_STARTCOL = col
break
if HEADER_ROW == None or HEADER_STARTCOL == None:
parser_error("failed to parse this as a footprint spreadsheet. "+
"No header row found: looked for "+regexp_str)
def parse(instr):
"""main function for parsing footprint spreadsheets."""
# TODO: a spreadsheet should really be an object and cellval a method
global DATA, MESSAGES, CURRENT_ROW
DATA = {}
MESSAGES = []
CURRENT_ROW = None
updated = {}
parse_gspreadsheet(instr, updated)
# find header row: look for "opportunity title" (case insensitive)
find_header_row('opportunity\s*title')
if not HEADER_ROW or not HEADER_STARTCOL:
return DATA, MESSAGES
header_colidx = {}
header_names = {}
header_col = HEADER_STARTCOL
while True:
header_str = cellval(HEADER_ROW, header_col)
if not header_str:
break
field_name = None
header_str = header_str.lower()
if header_str.find("title") >= 0:
field_name = "OpportunityTitle"
elif header_str.find("organization") >= 0 and \
header_str.find("sponsor") >= 0:
field_name = "SponsoringOrganization"
elif header_str.find("description") >= 0:
field_name = "Description"
elif header_str.find("skills") >= 0:
field_name = "Skills"
elif header_str.find("location") >= 0 and header_str.find("name") >= 0:
field_name = "LocationName"
elif header_str.find("street") >= 0:
field_name = "LocationStreet"
elif header_str.find("city") >= 0:
field_name = "LocationCity"
elif header_str.find("state") >= 0 or header_str.find("province") >= 0:
field_name = "LocationProvince"
elif header_str.find("zip") >= 0 or header_str.find("postal") >= 0:
field_name = "LocationPostalCode"
elif header_str.find("country") >= 0:
field_name = "LocationCountry"
elif header_str.find("start") >= 0 and header_str.find("date") >= 0:
field_name = "StartDate"
elif header_str.find("start") >= 0 and header_str.find("time") >= 0:
field_name = "StartTime"
elif header_str.find("end") >= 0 and header_str.find("date") >= 0:
field_name = "EndDate"
elif header_str.find("end") >= 0 and header_str.find("time") >= 0:
field_name = "EndTime"
elif header_str.find("contact") >= 0 and header_str.find("name") >= 0:
field_name = "ContactName"
elif header_str.find("email") >= 0 or header_str.find("e-mail") >= 0:
field_name = "ContactEmail"
elif header_str.find("phone") >= 0:
field_name = "ContactPhone"
elif header_str.find("website") >= 0 or header_str.find("url") >= 0:
field_name = "URL"
elif header_str.find("often") >= 0:
field_name = "Frequency"
elif header_str.find("days") >= 0 and header_str.find("week") >= 0:
field_name = "DaysOfWeek"
elif header_str.find("paid") >= 0:
field_name = "Paid"
elif header_str.find("commitment") >= 0 or header_str.find("hours") >= 0:
field_name = "CommitmentHours"
elif header_str.find("age") >= 0 and header_str.find("min") >= 0:
field_name = "MinimumAge"
elif header_str.find("kid") >= 0:
field_name = "KidFriendly"
elif header_str.find("senior") >= 0 and header_str.find("only") >= 0:
field_name = "SeniorsOnly"
elif header_str.find("sex") >= 0 or header_str.find("gender") >= 0:
field_name = "SexRestrictedTo"
elif header_str.find("volunteer appeal") >= 0:
field_name = None
else:
parser_error("couldn't map header '"+header_str+"' to a field name.")
if field_name != None:
header_colidx[field_name] = header_col
header_names[header_col] = field_name
#print header_str, "=>", field_name
header_col += 1
if len(header_names) < 10:
parser_error("too few fields found: "+str(len(header_names)))
# check to see if there's a header-description row
header_desc = cellval(HEADER_ROW+1, HEADER_STARTCOL)
if not header_desc:
parser_error("blank row not allowed below header row")
header_desc = header_desc.lower()
data_startrow = HEADER_ROW + 1
if header_desc.find("up to") >= 0:
data_startrow += 1
# find the data
CURRENT_ROW = data_startrow
blankrows = 0
numopps = 0
addr_ar = []
urls_ar = []
while True:
blankrow = True
#rowstr = "row="+str(row)+"\n"
record = {}
record['LastUpdated'] = '0000-00-00'
for field_name in header_colidx:
col = header_colidx[field_name]
val = cellval(CURRENT_ROW, col)
if val:
blankrow = False
else:
val = ""
#rowstr += " "+field_name+"="+val+"\n"
record[field_name] = val
key = 'R'+str(CURRENT_ROW)+'C'+str(col)
if (key in updated and
updated[key] > record['LastUpdated']):
record['LastUpdated'] = updated[key]
if blankrow:
blankrows += 1
if blankrows > MAX_BLANKROWS:
break
else:
numopps += 1
blankrows = 0
record['oppid'] = str(numopps)
get_minlen(record, 'OpportunityTitle', 4)
get_minlen(record, 'Description', 15)
location_name = get_minlen(record, 'LocationName', 4)
if location_name == "virtual":
is_virtual = True
elif location_name.lower() == "virtaul" or location_name.lower() == "virtual":
parser_error("misspelled location name: "+location_name+
" -- perhaps you meant 'virtual'? (note spelling)")
is_virtual = True
else:
is_virtual = False
if is_virtual:
reason = " for virtual opportunities-- if you want both a location and"
reason += " a virtual opportunity, then provide two separate records."
get_blank(record, "LocationStreet", reason)
get_blank(record, "LocationCity", reason)
get_blank(record, "LocationProvince", reason)
get_blank(record, "LocationPostalCode", reason)
get_blank(record, "LocationCountry", reason)
else:
# TODO: appengine 30sec timeouts render this ambiguous/confuse for users
addr = recordval(record, "LocationStreet")
addr += " "+recordval(record, "LocationCity")
addr += " "+recordval(record, "LocationProvince")
addr += " "+recordval(record, "LocationPostalCode")
addr += " "+recordval(record, "LocationCountry")
addr_ar.append(addr)
start_date = recordval(record, "StartDate")
if start_date == "ongoing":
ongoing = True
elif start_date.lower().find("ong") == 0:
parser_error("misspelled Start Date: "+start_date+
" -- perhaps you meant 'ongoing'? (note spelling)")
ongoing = True
elif start_date == "":
parser_error("Start Date may not be blank.")
ongoing = True
else:
ongoing = False
if ongoing:
start_time = recordval(record, "StartTime")
if start_time != "" and start_time != "ongoing":
parser_error("ongoing event should have blank Start Time.")
end_date = recordval(record, "EndDate")
if end_date != "" and end_date != "ongoing":
parser_error("ongoing event should have blank End Date.")
end_time = recordval(record, "EndTime")
if end_time != "" and end_time != "ongoing":
parser_error("ongoing event should have blank End Time.")
else:
date_start = get_dtval(record, "StartDate")
time_start = get_tmval(record, "StartTime")
date_ending = get_dtval(record, "EndDate")
if len(date_ending) < 1:
date_ending = date_start
time_ending = get_tmval(record, "EndTime")
def to_epoch(timestamp):
try:
t = time.strptime(timestamp, "%m/%d/%Y %H:%M:%S")
epoch = int(calendar.timegm(t))
except:
epoch = 0
return epoch
if (to_epoch(date_start + ' ' + time_start) >
to_epoch(date_ending + ' ' + time_ending)):
parser_error("Start Date/Time later than End Date/Time.")
get_minlen(record, 'URL', 12)
email = recordval(record, "ContactEmail")
if email != "" and email.find("@") == -1:
parser_error("malformed email address: "+email)
url = recordval(record, "URL")
urls_ar.append(url)
daysofweek = recordval(record, "DaysOfWeek").split(",")
for dow in daysofweek:
lcdow = dow.strip().lower()
if lcdow not in ["sat", "saturday",
"sun", "sunday",
"mon", "monday",
"tue", "tues", "tuesday",
"wed", "weds", "wednesday",
"thu", "thur", "thurs", "thursday",
"fri", "friday", ""]:
# TODO: support these alternates in the datahub!
parser_error("malformed day of week: '%s'" % dow)
get_boolval(record, "Paid")
get_intval(record, "CommitmentHours")
get_intval(record, "MinimumAge")
get_boolval(record, "KidFriendly")
get_boolval(record, "SeniorsOnly")
sexrestrict = recordval(record, "SexRestrictedTo")
if sexrestrict.lower() not in ["women", "men", "either", ""]:
parser_error("bad SexRestrictedTo-- try Men, Women, Either or (blank).")
org = recordval(record, 'SponsoringOrganization')
if org == "":
parser_error("missing Sponsoring Organization-- this field is required."+
" (it can be an informal name, or even a person's name).")
else:
get_minlen(record, 'SponsoringOrganization', 4)
freq = recordval(record, 'Frequency').lower()
if not (freq == "" or freq == "once" or freq == "daily" or
freq == "weekly" or freq == "every other week" or
freq == "monthly"):
parser_error("unsupported frequency: '"+
recordval(record, 'Frequency')+"'")
CURRENT_ROW += 1
if len(MESSAGES) == 0:
MESSAGES.append("spreadsheet parsed correctly!" +
" Feel free to submit if the locations and URL's check out.")
return DATA, MESSAGES, addr_ar, urls_ar
| Python |
# Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
toss all the scoring code into one place (rather than a class file)
because scoring tends to get complex quickly.
"""
from datetime import datetime
import logging
import math
import api
import view_helper
def compare_scores(val1, val2):
"""helper function for sorting."""
diff = val2.score - val1.score
if (diff > 0):
return 1
if (diff < 0):
return -1
return 0
def score_results_set(result_set, args):
"""sort results by score, and for each, set .score, .scorestr, .score_notes"""
logging.debug(str(datetime.now())+": score_results_set(): start")
idlist = map(lambda x: x.item_id, result_set.results)
# handle rescoring on interest weights
others_interests = view_helper.get_interest_for_opportunities(idlist)
total_results = float(len(result_set.results))
for i, res in enumerate(result_set.results):
score = 1.0
score_notes = ""
# keywordless queries should rank by location and time, not relevance.
if api.PARAM_Q in args and args[api.PARAM_Q] != "":
# lower ranking items in the backend = lower ranking here (roughly 1/rank)
rank_mult = (total_results - i)/total_results
score *= rank_mult
score_notes += " backend multiplier=%.3f (rank=%d)\n" % (i, rank_mult)
# TODO: match on start time, etc.
ONEDAY = 24.0 * 3600.0
MAXTIME = 500.0 * ONEDAY
start_delta = res.startdate - datetime.now()
start_delta_secs = start_delta.days*ONEDAY + start_delta.seconds
start_delta_secs = min(max(start_delta_secs, 0), MAXTIME)
end_delta = res.enddate - datetime.now()
end_delta_secs = end_delta.days*ONEDAY + end_delta.seconds
end_delta_secs = min(max(end_delta_secs, start_delta_secs), MAXTIME)
date_dist_multiplier = 1
if end_delta_secs <= 0:
date_dist_multiplier = .0001
if start_delta_secs > 0:
# further out start date = lower rank (roughly 1/numdays)
date_dist_multiplier = 1.0/(start_delta_secs/ONEDAY)
score *= date_dist_multiplier
score_notes += " date_mult=" + str(date_dist_multiplier)
score_notes += " start=%s (%+g days)" % (
res.startdate, start_delta_secs / ONEDAY)
score_notes += " end=%s (%+g days)" % (
res.enddate, end_delta_secs / ONEDAY)
score_notes += "\n"
# boost short events
delta_secs = end_delta_secs - start_delta_secs
if delta_secs > 0:
# up to 14 days gets a boost
ddays = 10*max(14 - delta_secs/ONEDAY, 1.0)
date_delta_multiplier = math.log10(ddays)
else:
date_delta_multiplier = 1
score *= date_delta_multiplier
score_notes += " date_delta_mult=%.3f (%g days)\n" % (
date_delta_multiplier, delta_secs / float(ONEDAY))
if (("lat" not in args) or args["lat"] == "" or
("long" not in args) or args["long"] == "" or
res.latlong == ""):
geo_dist_multiplier = 0.5
else:
# TODO: error in the DB, we're getting same geocodes for everything
lat, lng = res.latlong.split(",")
latdist = float(lat) - float(args["lat"])
lngdist = float(lng) - float(args["long"])
# keep one value to right of decimal
delta_dist = latdist*latdist + lngdist*lngdist
logging.debug("qloc=%s,%s - listing=%g,%g - dist=%g,%g - delta = %g" %
(args["lat"], args["long"], float(lat), float(lng),
latdist, lngdist, delta_dist))
# reasonably local
if delta_dist > 0.025:
delta_dist = 0.9 + delta_dist
else:
delta_dist = delta_dist / (0.025 / 0.9)
if delta_dist > 0.999:
delta_dist = 0.999
geo_dist_multiplier = 1.0 - delta_dist
interest = -1
if res.item_id in others_interests:
interest = others_interests[res.item_id]
elif "test_stars" in args:
interest = i % 6
score *= geo_dist_multiplier
score_notes += " geo multiplier=" + str(geo_dist_multiplier)
if interest >= 0:
# TODO: remove hocus-pocus math
interest_weight = (math.log(interest+1.0)/math.log(6.0))**3
score *= interest_weight
score_notes += " "+str(interest)+"-stars="+str(interest_weight)
res.set_score(score, score_notes)
result_set.results.sort(cmp=compare_scores)
logging.debug(str(datetime.now())+": score_results_set(): done")
| Python |
# Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
high-level routines for querying backend datastores and processing the results.
"""
import calendar
import datetime
import hashlib
import logging
import copy
from google.appengine.api import memcache
import api
import base_search
import geocode
import scoring
from fastpageviews import pagecount
CACHE_TIME = 24*60*60 # seconds
# args is expected to be a list of args
# and any path info is supposed to be homogenized into this,
# e.g. /listing/56_foo should be resolved into [('id',56)]
# by convention, repeated args are ignored, LAST ONE wins.
def search(args):
"""run a search against the backend specified by the 'backend' arg.
Returns a result set that's been (a) de-dup'd ("merged") and (b) truncated
to the appropriate number of results ("clipped"). Impression tracking
happens here as well."""
# TODO(paul): Create a QueryParams object to handle validation.
# Validation should be lazy, so that (for example) here
# only 'num' and 'start' are validated, since we don't
# yet need the rest. QueryParams can have a function to
# create a normalized string, for the memcache key.
# pylint: disable-msg=C0321
normalize_query_values(args)
# TODO: query param (& add to spec) for defeating the cache (incl FastNet)
# I (mblain) suggest using "zx", which is used at Google for most services.
# TODO: Should construct our own normalized query string instead of
# using the browser's querystring.
args_array = [str(key)+'='+str(value) for (key, value) in args.items()]
args_array.sort()
normalized_query_string = str('&'.join(args_array))
use_cache = True
if api.PARAM_CACHE in args and args[api.PARAM_CACHE] == '0':
use_cache = False
logging.debug('Not using search cache')
# note: key cannot exceed 250 bytes
memcache_key = hashlib.md5('search:' + normalized_query_string).hexdigest()
start = int(args[api.PARAM_START])
num = int(args[api.PARAM_NUM])
result_set = None
if use_cache:
result_set = memcache.get(memcache_key)
if result_set:
logging.debug('in cache: "' + normalized_query_string + '"')
if len(result_set.merged_results) < start + num:
logging.debug('but too small-- rerunning query...')
result_set = None
else:
logging.debug('not in cache: "' + normalized_query_string + '"')
if not result_set:
result_set = fetch_result_set(args)
memcache.set(memcache_key, result_set, time=CACHE_TIME)
result_set.clip_merged_results(start, num)
# TODO: for better results, we should segment CTR computation by
# homepage vs. search views, etc. -- but IMHO it's better to give
# up and outsource stats to a web-hosted service.
if 'key' in args and args['key'] == pagecount.TEST_API_KEY:
logging.debug("search(): not tracking testapi key views")
# needed to populate stats
result_set.track_views(num_to_incr=0)
else:
result_set.track_views(num_to_incr=1)
return result_set
def min_max(val, minval, maxval):
return max(min(maxval, val), minval)
def normalize_query_values(args):
"""Pre-processes several values related to the search API that might be
present in the query string."""
# api.PARAM_OUTPUT is only used by callers (the view)
# (though I can imagine some output formats dictating which fields are
# retrieved from the backend...)
#
#if args[api.PARAM_OUTPUT] not in ['html', 'tsv', 'csv', 'json', 'rss',
# 'rssdesc', 'xml', 'snippets_list']
#
# TODO: csv list of fields
#if args[api.PARAM_FIELDS] not in ['all', 'rss']:
# TODO: process dbg -- currently, anything goes...
# RESERVED: v
# RESERVED: sort
# RESERVED: type
def dbgargs(arg):
logging.debug("args[%s]=%s" % (arg, args[arg]))
num = int(args.get(api.PARAM_NUM, 10))
args[api.PARAM_NUM] = min_max(num, api.CONST_MIN_NUM, api.CONST_MAX_NUM)
dbgargs(api.PARAM_NUM)
start_index = int(args.get(api.PARAM_START, 1))
args[api.PARAM_START] = min_max(
start_index, api.CONST_MIN_START, api.CONST_MAX_START-num)
dbgargs(api.PARAM_START)
if api.PARAM_OVERFETCH_RATIO in args:
overfetch_ratio = float(args[api.PARAM_OVERFETCH_RATIO])
elif args[api.PARAM_START] > 1:
# increase the overfetch ratio after the first page--
# overfetch is expensive and we don't want to do this
# on page one, which is very performance sensitive.
overfetch_ratio = api.CONST_MAX_OVERFETCH_RATIO
else:
overfetch_ratio = 2.0
args[api.PARAM_OVERFETCH_RATIO] = min_max(
overfetch_ratio, api.CONST_MIN_OVERFETCH_RATIO,
api.CONST_MAX_OVERFETCH_RATIO)
dbgargs(api.PARAM_OVERFETCH_RATIO)
# PARAM_TIMEPERIOD overrides VOL_STARTDATE/VOL_ENDDATE
if api.PARAM_TIMEPERIOD in args:
period = args[api.PARAM_TIMEPERIOD]
# No need to pass thru, just convert period to discrete date args.
del args[api.PARAM_TIMEPERIOD]
date_range = None
today = datetime.date.today()
if period == 'today':
date_range = (today, today)
elif period == 'this_weekend':
days_to_sat = 5 - today.weekday()
delta = datetime.timedelta(days=days_to_sat)
this_saturday = today + delta
this_sunday = this_saturday + datetime.timedelta(days=1)
date_range = (this_saturday, this_sunday)
elif period == 'this_week':
days_to_mon = 0 - today.weekday()
delta = datetime.timedelta(days=days_to_mon)
this_monday = today + delta
this_sunday = this_monday + datetime.timedelta(days=6)
date_range = (this_monday, this_sunday)
elif period == 'this_month':
days_to_first = 1 - today.day
delta = datetime.timedelta(days=days_to_first)
first_of_month = today + delta
days_to_month_end = calendar.monthrange(today.year, today.month)[1] - 1
delta = datetime.timedelta(days=days_to_month_end)
last_of_month = first_of_month + delta
date_range = (first_of_month, last_of_month)
if date_range:
start_date = date_range[0].strftime("%Y-%m-%d")
end_date = date_range[1].strftime("%Y-%m-%d")
args[api.PARAM_VOL_STARTDATE] = start_date
args[api.PARAM_VOL_ENDDATE] = end_date
logging.debug("date range: "+ start_date + '...' + end_date)
if api.PARAM_Q not in args:
args[api.PARAM_Q] = ""
dbgargs(api.PARAM_Q)
if api.PARAM_VOL_LOC not in args:
# bugfix for http://code.google.com/p/footprint2009dev/issues/detail?id=461
# q=Massachusetts should imply vol_loc=Massachusetts, USA
# note that this implementation also makes q=nature match
# a town near santa ana, CA
# http://www.allforgood.org/search#q=nature&vol_loc=nature%2C%20USA
args[api.PARAM_VOL_LOC] = args[api.PARAM_Q] + " USA"
args["lat"] = args["long"] = ""
if api.PARAM_VOL_LOC in args:
zoom = 5
if geocode.is_latlong(args[api.PARAM_VOL_LOC]):
args["lat"], args["long"] = args[api.PARAM_VOL_LOC].split(",")
elif geocode.is_latlongzoom(args[api.PARAM_VOL_LOC]):
args["lat"], args["long"], zoom = args[api.PARAM_VOL_LOC].split(",")
elif args[api.PARAM_VOL_LOC] == "virtual":
args["lat"] = args["long"] = "0.0"
zoom = 6
elif args[api.PARAM_VOL_LOC] == "anywhere":
args["lat"] = args["long"] = ""
else:
res = geocode.geocode(args[api.PARAM_VOL_LOC])
if res != "":
args["lat"], args["long"], zoom = res.split(",")
args["lat"] = args["lat"].strip()
args["long"] = args["long"].strip()
if api.PARAM_VOL_DIST not in args:
zoom = int(zoom)
if zoom == 1:
# country zoomlevel is kinda bogus--
# 500 mile search radius (avoids 0.0,0.0 in the atlantic ocean)
args[api.PARAM_VOL_DIST] = 500
elif zoom == 2: # region
# state/region is very wide-- start with 50 mile radius,
# and we'll fallback to larger.
args[api.PARAM_VOL_DIST] = 50
elif zoom == 3: # county
# county radius should be pretty rare-- start with 10 mile radius,
# and we'll fallback to larger.
args[api.PARAM_VOL_DIST] = 10
elif zoom == 4 or zoom == 0:
# city is the common case-- start with 5 mile search radius,
# and we'll fallback to larger. This avoids accidentally
# prioritizing listings from neighboring cities.
args[api.PARAM_VOL_DIST] = 5
elif zoom == 5:
# postal codes are also a common case-- start with a narrower
# radius than the city, and we'll fallback to larger.
args[api.PARAM_VOL_DIST] = 3
elif zoom > 5:
# street address or GPS coordinates-- start with a very narrow
# search suitable for walking.
args[api.PARAM_VOL_DIST] = 1
else:
args[api.PARAM_VOL_LOC] = args[api.PARAM_VOL_DIST] = ""
dbgargs(api.PARAM_VOL_LOC)
def fetch_and_dedup(args):
"""fetch, score and dedup."""
result_set = base_search.search(args)
scoring.score_results_set(result_set, args)
result_set.dedup()
return result_set
def fetch_result_set(args):
"""Validate the search parameters, and perform the search."""
result_set = fetch_and_dedup(args)
def can_use_backfill(args, result_set):
if (not result_set.has_more_results
and result_set.num_merged_results <
int(args[api.PARAM_NUM]) + int(args[api.PARAM_START])):
return True
return False
if (can_use_backfill(args, result_set) and
(args["lat"] != "0.0" or args["long"] != "0.0")):
newargs = copy.copy(args)
newargs[api.PARAM_VOL_DIST] = newargs[api.PARAM_VOL_DIST] * 5
logging.debug("backfilling with further listings...")
locationless_result_set = fetch_and_dedup(newargs)
logging.debug("len(result_set.results)=%d" % len(result_set.results))
logging.debug("len(locationless)=%d" % len(locationless_result_set.results))
result_set.append_results(locationless_result_set)
logging.debug("new len=%d" % len(result_set.results))
# backfill with locationless listings
locationless_result_set = []
if (can_use_backfill(args, result_set) and
(args["lat"] != "0.0" or args["long"] != "0.0")):
newargs = copy.copy(args)
newargs["lat"] = newargs["long"] = "0.0"
newargs[api.PARAM_VOL_DIST] = 50
logging.debug("backfilling with locationless listings...")
locationless_result_set = fetch_and_dedup(newargs)
logging.debug("len(result_set.results)=%d" % len(result_set.results))
logging.debug("len(locationless)=%d" % len(locationless_result_set.results))
result_set.append_results(locationless_result_set)
logging.debug("new len=%d" % len(result_set.results))
return result_set
| Python |
#!/usr/bin/python2.5
# Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""User Info module (userinfo).
This file contains the base class for the userinfo classes.
It also contains (at least for now) subclasses for different login types."""
__author__ = 'matthew.blain@google.com'
import logging
import os
from django.utils import simplejson
from google.appengine.api import memcache
from google.appengine.api import urlfetch
from StringIO import StringIO
from facebook import Facebook
import deploy
import models
import utils
class Error(Exception): pass
class NotLoggedInError(Error): pass
class ThirdPartyError(Error): pass
USERINFO_CACHE_TIME = 120 # seconds
# Keys specific to Footprint
FRIENDCONNECT_KEY = '02962301966004179520'
def get_cookie(cookie_name):
if 'HTTP_COOKIE' in os.environ:
cookies = os.environ['HTTP_COOKIE']
cookies = cookies.split('; ')
for cookie in cookies:
cookie = cookie.split('=')
if cookie[0] == cookie_name:
return cookie[1]
def get_user(request):
for cls in (TestUser, FriendConnectUser, FacebookUser):
cookie = cls.get_cookie()
if cookie:
key = 'cookie:' + cookie
user = memcache.get(key)
if not user:
try:
user = cls(request)
memcache.set(key, user, time = USERINFO_CACHE_TIME)
except:
# This hides all errors from the Facebook client library
# TODO(doll): Hand back an error message to the user
logging.exception("Facebook or Friend Connect client exception.")
return None
return user
def get_usig(user):
"""Get a signature for the current user suitable for an XSRF token."""
if user and user.get_cookie():
return utils.signature(user.get_cookie())
class User(object):
"""The User info for a user related to a currently logged in session.."""
def __init__(self, account_type, user_id, display_name, thumbnail_url):
self.account_type = account_type
self.user_id = user_id
self.display_name = display_name
self.thumbnail_url = thumbnail_url
self.user_info = None
self.friends = None
self.total_friends = None
@staticmethod
def get_current_user(self):
raise NotImplementedError
def get_user_info(self):
if not self.user_info:
self.user_info = models.UserInfo.get_or_insert_user(self.account_type,
self.user_id)
return self.user_info
def load_friends(self):
key_suffix = self.account_type + ":" + self.user_id
key = 'friends:' + key_suffix
total_key = 'total_friends:' + key_suffix
self.friends = memcache.get(key)
self.total_friends = memcache.get(total_key)
if not self.friends:
self.friends = self.get_friends_by_url();
memcache.set(key, self.friends, time = USERINFO_CACHE_TIME)
memcache.set(total_key, self.total_friends, time = USERINFO_CACHE_TIME)
return self.friends
def get_friends_by_url(self):
raise NotImplementedError
@classmethod
def is_logged_in(cls):
cookie = cls.get_cookie()
return not not cookie
class FriendConnectUser(User):
"""A friendconnect user."""
BASE_URL = 'http://www.google.com/friendconnect/api/people/'
USER_INFO_URL = BASE_URL + '@viewer/@self?fcauth=%s'
FRIEND_URL = BASE_URL + '@viewer/@friends?fcauth=%s'
def __init__(self, request):
"""Creates a friendconnect user from the current env, or raises error."""
self.fc_user_info = self.get_fc_user_info()
super(FriendConnectUser, self).__init__(
models.UserInfo.FRIENDCONNECT,
self.fc_user_info['entry']['id'],
self.fc_user_info['entry']['displayName'],
self.fc_user_info['entry']['thumbnailUrl'])
def get_friends_by_url(self):
friend_cookie = self.get_cookie()
if not friend_cookie:
raise NotLoggedInError()
self.friends = []
url = self.FRIEND_URL % friend_cookie
result = urlfetch.fetch(url)
if result.status_code == 200:
friend_info = simplejson.load(StringIO(result.content))
self.total_friends = friend_info['totalResults']
for friend_object in friend_info['entry']:
friend = User(
models.UserInfo.FRIENDCONNECT,
friend_object['id'],
friend_object['displayName'],
friend_object['thumbnailUrl'])
self.friends.append(friend)
return self.friends
@classmethod
def get_cookie(cls):
return get_cookie('fcauth' + FRIENDCONNECT_KEY)
@classmethod
def get_fc_user_info(cls):
friend_cookie = cls.get_cookie()
if not friend_cookie:
raise NotLoggedInError()
return
url = cls.USER_INFO_URL % friend_cookie
result = urlfetch.fetch(url)
if result.status_code == 200:
user_info = simplejson.load(StringIO(result.content))
return user_info
else:
raise ThirdPartyError()
class FacebookUser(User):
def __init__(self, request):
self.facebook = Facebook(deploy.get_facebook_key(),
deploy.get_facebook_secret())
if not self.facebook.check_connect_session(request):
raise NotLoggedInError()
info = self.facebook.users.getInfo([self.facebook.uid],
['name', 'pic_square_with_logo'])[0]
super(FacebookUser, self).__init__(
models.UserInfo.FACEBOOK,
self.facebook.uid,
info['name'],
info['pic_square_with_logo'])
def get_friends_by_url(self):
if not self.facebook:
raise NotLoggedInError()
self.friends = []
friend_ids = self.facebook.friends.getAppUsers()
if not friend_ids or len(friend_ids) == 0:
friend_ids = [] # Force return type to be a list, not a dict or None.
self.total_friends = len(friend_ids)
# TODO: handle >20 friends.
friend_objects = self.facebook.users.getInfo([friend_ids[0:20]],
['name', 'pic_square_with_logo'])
for friend_object in friend_objects:
friend = User(
models.UserInfo.FACEBOOK,
`friend_object['uid']`,
friend_object['name'],
friend_object['pic_square_with_logo'])
self.friends.append(friend)
return self.friends
@classmethod
def get_cookie(cls):
return get_cookie(deploy.get_facebook_key())
class TestUser(User):
"""A really simple user example."""
def __init__(self, request):
"""Creates a user, or raises error."""
cookie = self.get_cookie()
if not (cookie):
raise NotLoggedInError()
super(TestUser, self).__init__(
models.UserInfo.TEST,
cookie,
cookie,
'images/Event-Selected-Star.png')
@classmethod
def get_cookie(cls):
return get_cookie('footprinttest')
def get_friends_by_url(self):
# TODO: Something clever for testing--like all TestUser?
return []
| Python |
#!/usr/bin/env python
#
# Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Creates backup of tables.
"""
import sys
import logging
import getopt
import urllib2
import datetime
from datetime import date
def print_usage_exit(code):
""" print usage and exit """
print sys.modules['__main__'].__doc__ % sys.argv[0]
sys.stdout.flush()
sys.stderr.flush()
sys.exit(code)
def handle_response(url):
""" read the last key and the number of records copied """
try:
connection = urllib2.urlopen(url)
content = connection.read()
connection.close()
except urllib2.URLError, eobj:
logging.error('%s returned error %i, %s' % (url, eobj.code, eobj.msg))
sys.exit(2)
last_key = ""
rows = 0
lines = content.split("\n")
for line in lines:
field = line.split("\t")
if field[0] == "rows":
rows = int(field[1])
elif field[0] == "last_key":
last_key = field[1]
return last_key, rows
def parse_arguments(argv):
""" parse arguments """
opts, args = getopt.getopt(
argv[1:],
'dh',
['debug', 'help', 'url=', 'table=',
'backup_version=', 'restore_version=', 'digsig=', 'batch_size='
])
def lzero(number_string):
""" prepend 0 if length less than 2 """
rtn = number_string
while len(rtn) < 2:
rtn = '0' + rtn
return rtn
url = "http://footprint2009dev.appspot.com/export"
table = ''
tod = date.today()
backup_version = str(tod.year) + lzero(str(tod.month)) + lzero(str(tod.day))
restore_version = ''
digsig = ''
batch_size = 1000
for option, value in opts:
if option == '--debug':
logging.getLogger().setLevel(logging.DEBUG)
if option in ('-h', '--help'):
print_usage_exit(0)
if option == '--url':
url = value
if option == '--backup_version':
backup_version = value
if restore_version:
print >> sys.stderr, 'backup and restore are mutually exclusive'
print_usage_exit(1)
if option == '--restore_version':
restore_version = value
if backup_version:
print >> sys.stderr, 'backup and restore are mutually exclusive'
print_usage_exit(1)
if option == '--table':
table = value
if option == '--digsig':
digsig = value
if option == '--batch_size':
batch_size = int(value)
if batch_size <= 0:
print >> sys.stderr, 'batch_size must be 1 or larger'
print_usage_exit(1)
opts = args # because pylint said args was unused
return (url, table, backup_version, restore_version, batch_size, digsig)
def main(argv):
""" start here """
logging.basicConfig(
level=logging.INFO,
format='%(levelname)-8s %(asctime)s %(message)s')
args = parse_arguments(argv)
if [arg for arg in args if arg is None]:
print >> sys.stderr, 'Invalid arguments'
print_usage_exit(1)
base_url, table, backup_version, restore_version, batch_size, digsig = args
if not base_url:
print >> sys.stderr, 'specify url'
print_usage_exit(1)
if backup_version:
url = "%s/%s/%s_%s" % (base_url, table, table, backup_version)
elif restore_version:
url = "%s/%s_%s/%s" % (base_url, table, table, restore_version)
else:
print >> sys.stderr, 'specify either backup_version or restore_version'
print_usage_exit(1)
min_key = ''
lines = batch_size
while lines == batch_size:
url_step = ("%s?digsig=%s&min_key=%s&limit=%s" %
(url, str(digsig), str(min_key), str(batch_size)))
if min_key != "":
log_key = min_key
else:
log_key = "[start]"
start_time = datetime.datetime.now()
min_key, lines = handle_response(url_step)
diff = datetime.datetime.now() - start_time
secs = "%d.%d" % (diff.seconds, diff.microseconds/1000)
logging.info('fetched %d in %s secs from %s', lines, secs, log_key)
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
| Python |
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tool for uploading diffs from a version control system to the codereview app.
Usage summary: upload.py [options] [-- diff_options]
Diff options are passed to the diff command of the underlying system.
Supported version control systems:
Git
Mercurial
Subversion
It is important for Git/Mercurial users to specify a tree/node/branch to diff
against by using the '--rev' option.
"""
# This code is derived from appcfg.py in the App Engine SDK (open source),
# and from ASPN recipe #146306.
import cookielib
import getpass
import logging
import md5
import mimetypes
import optparse
import os
import re
import socket
import subprocess
import sys
import urllib
import urllib2
import urlparse
import tempfile
try:
import readline
except ImportError:
logging.debug("readline not found.")
pass
# The logging verbosity:
# 0: Errors only.
# 1: Status messages.
# 2: Info logs.
# 3: Debug logs.
verbosity = 1
# Max size of patch or base file.
MAX_UPLOAD_SIZE = 900 * 1024
def AreYouSureOrExit(exit_if_no=True):
prompt = "Are you sure you want to continue?(y/N) "
answer = raw_input(prompt).strip()
if exit_if_no and answer.lower() != "y":
ErrorExit("User aborted")
return answer.lower() == "y"
def GetEmail(prompt):
"""Prompts the user for their email address and returns it.
The last used email address is saved to a file and offered up as a suggestion
to the user. If the user presses enter without typing in anything the last
used email address is used. If the user enters a new address, it is saved
for next time we prompt.
"""
last_email_file_name = os.path.expanduser("~/.last_codereview_email_address")
last_email = ""
if os.path.exists(last_email_file_name):
try:
last_email_file = open(last_email_file_name, "r")
last_email = last_email_file.readline().strip("\n")
last_email_file.close()
prompt += " [%s]" % last_email
except IOError, e:
pass
email = raw_input(prompt + ": ").strip()
if email:
try:
last_email_file = open(last_email_file_name, "w")
last_email_file.write(email)
last_email_file.close()
except IOError, e:
pass
else:
email = last_email
return email
def StatusUpdate(msg):
"""Print a status message to stdout.
If 'verbosity' is greater than 0, print the message.
Args:
msg: The string to print.
"""
if verbosity > 0:
print msg
def ErrorExit(msg):
"""Print an error message to stderr and exit."""
print >>sys.stderr, msg
sys.exit(1)
class ClientLoginError(urllib2.HTTPError):
"""Raised to indicate there was an error authenticating with ClientLogin."""
def __init__(self, url, code, msg, headers, args):
urllib2.HTTPError.__init__(self, url, code, msg, headers, None)
self.args = args
self.reason = args["Error"]
class AbstractRpcServer(object):
"""Provides a common interface for a simple RPC server."""
def __init__(self, host, auth_function, host_override=None, extra_headers={},
save_cookies=False):
"""Creates a new HttpRpcServer.
Args:
host: The host to send requests to.
auth_function: A function that takes no arguments and returns an
(email, password) tuple when called. Will be called if authentication
is required.
host_override: The host header to send to the server (defaults to host).
extra_headers: A dict of extra headers to append to every request.
save_cookies: If True, save the authentication cookies to local disk.
If False, use an in-memory cookiejar instead. Subclasses must
implement this functionality. Defaults to False.
"""
self.host = host
self.host_override = host_override
self.auth_function = auth_function
self.authenticated = False
self.extra_headers = extra_headers
self.save_cookies = save_cookies
self.opener = self._GetOpener()
if self.host_override:
logging.info("Server: %s; Host: %s", self.host, self.host_override)
else:
logging.info("Server: %s", self.host)
def _GetOpener(self):
"""Returns an OpenerDirector for making HTTP requests.
Returns:
A urllib2.OpenerDirector object.
"""
raise NotImplementedError()
def _CreateRequest(self, url, data=None):
"""Creates a new urllib request."""
logging.debug("Creating request for: '%s' with payload:\n%s", url, data)
req = urllib2.Request(url, data=data)
if self.host_override:
req.add_header("Host", self.host_override)
for key, value in self.extra_headers.iteritems():
req.add_header(key, value)
return req
def _GetAuthToken(self, email, password):
"""Uses ClientLogin to authenticate the user, returning an auth token.
Args:
email: The user's email address
password: The user's password
Raises:
ClientLoginError: If there was an error authenticating with ClientLogin.
HTTPError: If there was some other form of HTTP error.
Returns:
The authentication token returned by ClientLogin.
"""
account_type = "GOOGLE"
if self.host.endswith(".google.com"):
# Needed for use inside Google.
account_type = "HOSTED"
req = self._CreateRequest(
url="https://www.google.com/accounts/ClientLogin",
data=urllib.urlencode({
"Email": email,
"Passwd": password,
"service": "ah",
"source": "rietveld-codereview-upload",
"accountType": account_type,
}),
)
try:
response = self.opener.open(req)
response_body = response.read()
response_dict = dict(x.split("=")
for x in response_body.split("\n") if x)
return response_dict["Auth"]
except urllib2.HTTPError, e:
if e.code == 403:
body = e.read()
response_dict = dict(x.split("=", 1) for x in body.split("\n") if x)
raise ClientLoginError(req.get_full_url(), e.code, e.msg,
e.headers, response_dict)
else:
raise
def _GetAuthCookie(self, auth_token):
"""Fetches authentication cookies for an authentication token.
Args:
auth_token: The authentication token returned by ClientLogin.
Raises:
HTTPError: If there was an error fetching the authentication cookies.
"""
# This is a dummy value to allow us to identify when we're successful.
continue_location = "http://localhost/"
args = {"continue": continue_location, "auth": auth_token}
req = self._CreateRequest("http://%s/_ah/login?%s" %
(self.host, urllib.urlencode(args)))
try:
response = self.opener.open(req)
except urllib2.HTTPError, e:
response = e
if (response.code != 302 or
response.info()["location"] != continue_location):
raise urllib2.HTTPError(req.get_full_url(), response.code, response.msg,
response.headers, response.fp)
self.authenticated = True
def _Authenticate(self):
"""Authenticates the user.
The authentication process works as follows:
1) We get a username and password from the user
2) We use ClientLogin to obtain an AUTH token for the user
(see http://code.google.com/apis/accounts/AuthForInstalledApps.html).
3) We pass the auth token to /_ah/login on the server to obtain an
authentication cookie. If login was successful, it tries to redirect
us to the URL we provided.
If we attempt to access the upload API without first obtaining an
authentication cookie, it returns a 401 response and directs us to
authenticate ourselves with ClientLogin.
"""
for i in range(3):
credentials = self.auth_function()
try:
auth_token = self._GetAuthToken(credentials[0], credentials[1])
except ClientLoginError, e:
if e.reason == "BadAuthentication":
print >>sys.stderr, "Invalid username or password."
continue
if e.reason == "CaptchaRequired":
print >>sys.stderr, (
"Please go to\n"
"https://www.google.com/accounts/DisplayUnlockCaptcha\n"
"and verify you are a human. Then try again.")
break
if e.reason == "NotVerified":
print >>sys.stderr, "Account not verified."
break
if e.reason == "TermsNotAgreed":
print >>sys.stderr, "User has not agreed to TOS."
break
if e.reason == "AccountDeleted":
print >>sys.stderr, "The user account has been deleted."
break
if e.reason == "AccountDisabled":
print >>sys.stderr, "The user account has been disabled."
break
if e.reason == "ServiceDisabled":
print >>sys.stderr, ("The user's access to the service has been "
"disabled.")
break
if e.reason == "ServiceUnavailable":
print >>sys.stderr, "The service is not available; try again later."
break
raise
self._GetAuthCookie(auth_token)
return
def Send(self, request_path, payload=None,
content_type="application/octet-stream",
timeout=None,
**kwargs):
"""Sends an RPC and returns the response.
Args:
request_path: The path to send the request to, eg /api/appversion/create.
payload: The body of the request, or None to send an empty request.
content_type: The Content-Type header to use.
timeout: timeout in seconds; default None i.e. no timeout.
(Note: for large requests on OS X, the timeout doesn't work right.)
kwargs: Any keyword arguments are converted into query string parameters.
Returns:
The response body, as a string.
"""
# TODO: Don't require authentication. Let the server say
# whether it is necessary.
if not self.authenticated:
self._Authenticate()
old_timeout = socket.getdefaulttimeout()
socket.setdefaulttimeout(timeout)
try:
tries = 0
while True:
tries += 1
args = dict(kwargs)
url = "http://%s%s" % (self.host, request_path)
if args:
url += "?" + urllib.urlencode(args)
req = self._CreateRequest(url=url, data=payload)
req.add_header("Content-Type", content_type)
try:
f = self.opener.open(req)
response = f.read()
f.close()
return response
except urllib2.HTTPError, e:
if tries > 3:
raise
elif e.code == 401:
self._Authenticate()
## elif e.code >= 500 and e.code < 600:
## # Server Error - try again.
## continue
else:
raise
finally:
socket.setdefaulttimeout(old_timeout)
class HttpRpcServer(AbstractRpcServer):
"""Provides a simplified RPC-style interface for HTTP requests."""
def _Authenticate(self):
"""Save the cookie jar after authentication."""
super(HttpRpcServer, self)._Authenticate()
if self.save_cookies:
StatusUpdate("Saving authentication cookies to %s" % self.cookie_file)
self.cookie_jar.save()
def _GetOpener(self):
"""Returns an OpenerDirector that supports cookies and ignores redirects.
Returns:
A urllib2.OpenerDirector object.
"""
opener = urllib2.OpenerDirector()
opener.add_handler(urllib2.ProxyHandler())
opener.add_handler(urllib2.UnknownHandler())
opener.add_handler(urllib2.HTTPHandler())
opener.add_handler(urllib2.HTTPDefaultErrorHandler())
opener.add_handler(urllib2.HTTPSHandler())
opener.add_handler(urllib2.HTTPErrorProcessor())
if self.save_cookies:
self.cookie_file = os.path.expanduser("~/.codereview_upload_cookies")
self.cookie_jar = cookielib.MozillaCookieJar(self.cookie_file)
if os.path.exists(self.cookie_file):
try:
self.cookie_jar.load()
self.authenticated = True
StatusUpdate("Loaded authentication cookies from %s" %
self.cookie_file)
except (cookielib.LoadError, IOError):
# Failed to load cookies - just ignore them.
pass
else:
# Create an empty cookie file with mode 600
fd = os.open(self.cookie_file, os.O_CREAT, 0600)
os.close(fd)
# Always chmod the cookie file
os.chmod(self.cookie_file, 0600)
else:
# Don't save cookies across runs of update.py.
self.cookie_jar = cookielib.CookieJar()
opener.add_handler(urllib2.HTTPCookieProcessor(self.cookie_jar))
return opener
parser = optparse.OptionParser(usage="%prog [options] [-- diff_options]")
parser.add_option("-y", "--assume_yes", action="store_true",
dest="assume_yes", default=False,
help="Assume that the answer to yes/no questions is 'yes'.")
# Logging
group = parser.add_option_group("Logging options")
group.add_option("-q", "--quiet", action="store_const", const=0,
dest="verbose", help="Print errors only.")
group.add_option("-v", "--verbose", action="store_const", const=2,
dest="verbose", default=1,
help="Print info level logs (default).")
group.add_option("--noisy", action="store_const", const=3,
dest="verbose", help="Print all logs.")
# Review server
group = parser.add_option_group("Review server options")
group.add_option("-s", "--server", action="store", dest="server",
default="codereview.appspot.com",
metavar="SERVER",
help=("The server to upload to. The format is host[:port]. "
"Defaults to 'codereview.appspot.com'."))
group.add_option("-e", "--email", action="store", dest="email",
metavar="EMAIL", default=None,
help="The username to use. Will prompt if omitted.")
group.add_option("-H", "--host", action="store", dest="host",
metavar="HOST", default=None,
help="Overrides the Host header sent with all RPCs.")
group.add_option("--no_cookies", action="store_false",
dest="save_cookies", default=True,
help="Do not save authentication cookies to local disk.")
# Issue
group = parser.add_option_group("Issue options")
group.add_option("-d", "--description", action="store", dest="description",
metavar="DESCRIPTION", default=None,
help="Optional description when creating an issue.")
group.add_option("--min_pylint_score", action="store", dest="min_pylint_score",
metavar="MIN_PYLINT_SCORE", default=None,
help="run pylint over changed files and require a min score.")
group.add_option("-f", "--description_file", action="store",
dest="description_file", metavar="DESCRIPTION_FILE",
default=None,
help="Optional path of a file that contains "
"the description when creating an issue.")
group.add_option("--description_editor", action="store_true",
dest="description_editor", metavar="DESCRIPTION_EDITOR",
default=False,
help="use an editor (EDITOR env variable) to get the "
"description when creating an issue.")
group.add_option("-r", "--reviewers", action="store", dest="reviewers",
metavar="REVIEWERS", default=None,
help="Add reviewers (comma separated email addresses).")
group.add_option("--cc", action="store", dest="cc",
metavar="CC", default=None,
help="Add CC (comma separated email addresses).")
# Upload options
group = parser.add_option_group("Patch options")
group.add_option("-m", "--message", action="store", dest="message",
metavar="MESSAGE", default=None,
help="A message to identify the patch. "
"Will prompt if omitted.")
group.add_option("-i", "--issue", type="int", action="store",
metavar="ISSUE", default=None,
help="Issue number to which to add. Defaults to new issue.")
group.add_option("--download_base", action="store_true",
dest="download_base", default=False,
help="Base files will be downloaded by the server "
"(side-by-side diffs may not work on files with CRs).")
group.add_option("--rev", action="store", dest="revision",
metavar="REV", default=None,
help="Branch/tree/revision to diff against (used by DVCS).")
group.add_option("--send_mail", action="store_true",
dest="send_mail", default=False,
help="Send notification email to reviewers.")
def GetRpcServer(options):
"""Returns an instance of an AbstractRpcServer.
Returns:
A new AbstractRpcServer, on which RPC calls can be made.
"""
rpc_server_class = HttpRpcServer
def GetUserCredentials():
"""Prompts the user for a username and password."""
email = options.email
if email is None:
email = GetEmail("Email (login for uploading to %s)" % options.server)
password = getpass.getpass("Password for %s: " % email)
return (email, password)
# If this is the dev_appserver, use fake authentication.
host = (options.host or options.server).lower()
if host == "localhost" or host.startswith("localhost:"):
email = options.email
if email is None:
email = "test@example.com"
logging.info("Using debug user %s. Override with --email" % email)
server = rpc_server_class(
options.server,
lambda: (email, "password"),
host_override=options.host,
extra_headers={"Cookie":
'dev_appserver_login="%s:False"' % email},
save_cookies=options.save_cookies)
# Don't try to talk to ClientLogin.
server.authenticated = True
return server
return rpc_server_class(options.server, GetUserCredentials,
host_override=options.host,
save_cookies=options.save_cookies)
def EncodeMultipartFormData(fields, files):
"""Encode form fields for multipart/form-data.
Args:
fields: A sequence of (name, value) elements for regular form fields.
files: A sequence of (name, filename, value) elements for data to be
uploaded as files.
Returns:
(content_type, body) ready for httplib.HTTP instance.
Source:
http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/146306
"""
BOUNDARY = '-M-A-G-I-C---B-O-U-N-D-A-R-Y-'
CRLF = '\r\n'
lines = []
for (key, value) in fields:
lines.append('--' + BOUNDARY)
lines.append('Content-Disposition: form-data; name="%s"' % key)
lines.append('')
lines.append(value)
for (key, filename, value) in files:
lines.append('--' + BOUNDARY)
lines.append('Content-Disposition: form-data; name="%s"; filename="%s"' %
(key, filename))
lines.append('Content-Type: %s' % GetContentType(filename))
lines.append('')
lines.append(value)
lines.append('--' + BOUNDARY + '--')
lines.append('')
body = CRLF.join(lines)
content_type = 'multipart/form-data; boundary=%s' % BOUNDARY
return content_type, body
def GetContentType(filename):
"""Helper to guess the content-type from the filename."""
return mimetypes.guess_type(filename)[0] or 'application/octet-stream'
# Use a shell for subcommands on Windows to get a PATH search.
use_shell = sys.platform.startswith("win")
def RunShellWithReturnCode(command, print_output=False,
universal_newlines=True):
"""Executes a command and returns the output from stdout and the return code.
Args:
command: Command to execute.
print_output: If True, the output is printed to stdout.
If False, both stdout and stderr are ignored.
universal_newlines: Use universal_newlines flag (default: True).
Returns:
Tuple (output, return code)
"""
logging.info("Running %s", command)
p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
shell=use_shell, universal_newlines=universal_newlines)
if print_output:
output_array = []
while True:
line = p.stdout.readline()
if not line:
break
print line.strip("\n")
output_array.append(line)
output = "".join(output_array)
else:
output = p.stdout.read()
p.wait()
errout = p.stderr.read()
if print_output and errout:
print >>sys.stderr, errout
p.stdout.close()
p.stderr.close()
return output, p.returncode
def RunShell(command, silent_ok=False, universal_newlines=True,
print_output=False, ignore_retcode=False):
data, retcode = RunShellWithReturnCode(command, print_output,
universal_newlines)
if retcode and not ignore_retcode:
ErrorExit("Got error status from %s:\n%s" % (command, data))
if not silent_ok and not data:
ErrorExit("No output from %s" % command)
return data
class VersionControlSystem(object):
"""Abstract base class providing an interface to the VCS."""
def __init__(self, options):
"""Constructor.
Args:
options: Command line options.
"""
self.options = options
def GenerateDiff(self, args):
"""Return the current diff as a string.
Args:
args: Extra arguments to pass to the diff command.
"""
raise NotImplementedError(
"abstract method -- subclass %s must override" % self.__class__)
def GetUnknownFiles(self):
"""Return a list of files unknown to the VCS."""
raise NotImplementedError(
"abstract method -- subclass %s must override" % self.__class__)
def CheckForUnknownFiles(self):
"""Show an "are you sure?" prompt if there are unknown files."""
unknown_files = self.GetUnknownFiles()
if unknown_files:
print "The following files are not added to version control:"
for line in unknown_files:
print line
AreYouSureOrExit()
def GetBaseFile(self, filename):
"""Get the content of the upstream version of a file.
Returns:
A tuple (base_content, new_content, is_binary, status)
base_content: The contents of the base file.
new_content: For text files, this is empty. For binary files, this is
the contents of the new file, since the diff output won't contain
information to reconstruct the current file.
is_binary: True iff the file is binary.
status: The status of the file.
"""
raise NotImplementedError(
"abstract method -- subclass %s must override" % self.__class__)
def GetBaseFiles(self, diff):
"""Helper that calls GetBase file for each file in the patch.
Returns:
A dictionary that maps from filename to GetBaseFile's tuple. Filenames
are retrieved based on lines that start with "Index:" or
"Property changes on:".
"""
files = {}
for line in diff.splitlines(True):
if line.startswith('Index:') or line.startswith('Property changes on:'):
unused, filename = line.split(':', 1)
# On Windows if a file has property changes its filename uses '\'
# instead of '/'.
filename = filename.strip().replace('\\', '/')
files[filename] = self.GetBaseFile(filename)
return files
def UploadBaseFiles(self, issue, rpc_server, patch_list, patchset, options,
files):
"""Uploads the base files (and if necessary, the current ones as well)."""
def UploadFile(filename, file_id, content, is_binary, status, is_base):
"""Uploads a file to the server."""
file_too_large = False
if is_base:
type = "base"
else:
type = "current"
if len(content) > MAX_UPLOAD_SIZE:
print ("Not uploading the %s file for %s because it's too large." %
(type, filename))
file_too_large = True
content = ""
checksum = md5.new(content).hexdigest()
if options.verbose > 0 and not file_too_large:
print "Uploading %s file for %s" % (type, filename)
url = "/%d/upload_content/%d/%d" % (int(issue), int(patchset), file_id)
form_fields = [("filename", filename),
("status", status),
("checksum", checksum),
("is_binary", str(is_binary)),
("is_current", str(not is_base)),
]
if file_too_large:
form_fields.append(("file_too_large", "1"))
if options.email:
form_fields.append(("user", options.email))
ctype, body = EncodeMultipartFormData(form_fields,
[("data", filename, content)])
response_body = rpc_server.Send(url, body,
content_type=ctype)
if not response_body.startswith("OK"):
StatusUpdate(" --> %s" % response_body)
sys.exit(1)
patches = dict()
[patches.setdefault(v, k) for k, v in patch_list]
for filename in patches.keys():
base_content, new_content, is_binary, status = files[filename]
file_id_str = patches.get(filename)
if file_id_str.find("nobase") != -1:
base_content = None
file_id_str = file_id_str[file_id_str.rfind("_") + 1:]
file_id = int(file_id_str)
if base_content != None:
UploadFile(filename, file_id, base_content, is_binary, status, True)
if new_content != None:
UploadFile(filename, file_id, new_content, is_binary, status, False)
def IsImage(self, filename):
"""Returns true if the filename has an image extension."""
mimetype = mimetypes.guess_type(filename)[0]
if not mimetype:
return False
return mimetype.startswith("image/")
class SubversionVCS(VersionControlSystem):
"""Implementation of the VersionControlSystem interface for Subversion."""
def __init__(self, options):
super(SubversionVCS, self).__init__(options)
if self.options.revision:
match = re.match(r"(\d+)(:(\d+))?", self.options.revision)
if not match:
ErrorExit("Invalid Subversion revision %s." % self.options.revision)
self.rev_start = match.group(1)
self.rev_end = match.group(3)
else:
self.rev_start = self.rev_end = None
# Cache output from "svn list -r REVNO dirname".
# Keys: dirname, Values: 2-tuple (ouput for start rev and end rev).
self.svnls_cache = {}
# SVN base URL is required to fetch files deleted in an older revision.
# Result is cached to not guess it over and over again in GetBaseFile().
required = self.options.download_base or self.options.revision is not None
self.svn_base = self._GuessBase(required)
def GuessBase(self, required):
"""Wrapper for _GuessBase."""
return self.svn_base
def _GuessBase(self, required):
"""Returns the SVN base URL.
Args:
required: If true, exits if the url can't be guessed, otherwise None is
returned.
"""
info = RunShell(["svn", "info"])
for line in info.splitlines():
words = line.split()
if len(words) == 2 and words[0] == "URL:":
url = words[1]
scheme, netloc, path, params, query, fragment = urlparse.urlparse(url)
username, netloc = urllib.splituser(netloc)
if username:
logging.info("Removed username from base URL")
if netloc.endswith("svn.python.org"):
if netloc == "svn.python.org":
if path.startswith("/projects/"):
path = path[9:]
elif netloc != "pythondev@svn.python.org":
ErrorExit("Unrecognized Python URL: %s" % url)
base = "http://svn.python.org/view/*checkout*%s/" % path
logging.info("Guessed Python base = %s", base)
elif netloc.endswith("svn.collab.net"):
if path.startswith("/repos/"):
path = path[6:]
base = "http://svn.collab.net/viewvc/*checkout*%s/" % path
logging.info("Guessed CollabNet base = %s", base)
elif netloc.endswith(".googlecode.com"):
path = path + "/"
base = urlparse.urlunparse(("http", netloc, path, params,
query, fragment))
logging.info("Guessed Google Code base = %s", base)
else:
path = path + "/"
base = urlparse.urlunparse((scheme, netloc, path, params,
query, fragment))
logging.info("Guessed base = %s", base)
return base
if required:
ErrorExit("Can't find URL in output from svn info")
return None
def GenerateDiff(self, args):
cmd = ["svn", "diff"]
if self.options.revision:
cmd += ["-r", self.options.revision]
if "--diff-cmd" not in args and os.path.isfile("/usr/bin/diff"):
# force /usr/bin/diff as the diff command used by subversion
# to override user settings (fixes issue with colordiff)
cmd += ["--diff-cmd", "/usr/bin/diff"]
cmd.extend(args)
data = RunShell(cmd)
count = 0
for line in data.splitlines():
if line.startswith("Index:") or line.startswith("Property changes on:"):
count += 1
logging.info(line)
if not count:
ErrorExit("No valid patches found in output from svn diff")
return data
def _CollapseKeywords(self, content, keyword_str):
"""Collapses SVN keywords."""
# svn cat translates keywords but svn diff doesn't. As a result of this
# behavior patching.PatchChunks() fails with a chunk mismatch error.
# This part was originally written by the Review Board development team
# who had the same problem (http://reviews.review-board.org/r/276/).
# Mapping of keywords to known aliases
svn_keywords = {
# Standard keywords
'Date': ['Date', 'LastChangedDate'],
'Revision': ['Revision', 'LastChangedRevision', 'Rev'],
'Author': ['Author', 'LastChangedBy'],
'HeadURL': ['HeadURL', 'URL'],
'Id': ['Id'],
# Aliases
'LastChangedDate': ['LastChangedDate', 'Date'],
'LastChangedRevision': ['LastChangedRevision', 'Rev', 'Revision'],
'LastChangedBy': ['LastChangedBy', 'Author'],
'URL': ['URL', 'HeadURL'],
}
def repl(m):
if m.group(2):
return "$%s::%s$" % (m.group(1), " " * len(m.group(3)))
return "$%s$" % m.group(1)
keywords = [keyword
for name in keyword_str.split(" ")
for keyword in svn_keywords.get(name, [])]
return re.sub(r"\$(%s):(:?)([^\$]+)\$" % '|'.join(keywords), repl, content)
def GetUnknownFiles(self):
status = RunShell(["svn", "status", "--ignore-externals"], silent_ok=True)
unknown_files = []
for line in status.split("\n"):
if line and line[0] == "?":
unknown_files.append(line)
return unknown_files
def ReadFile(self, filename):
"""Returns the contents of a file."""
file = open(filename, 'rb')
result = ""
try:
result = file.read()
finally:
file.close()
return result
def GetStatus(self, filename):
"""Returns the status of a file."""
if not self.options.revision:
status = RunShell(["svn", "status", "--ignore-externals", filename])
if not status:
ErrorExit("svn status returned no output for %s" % filename)
status_lines = status.splitlines()
# If file is in a cl, the output will begin with
# "\n--- Changelist 'cl_name':\n". See
# http://svn.collab.net/repos/svn/trunk/notes/changelist-design.txt
if (len(status_lines) == 3 and
not status_lines[0] and
status_lines[1].startswith("--- Changelist")):
status = status_lines[2]
else:
status = status_lines[0]
# If we have a revision to diff against we need to run "svn list"
# for the old and the new revision and compare the results to get
# the correct status for a file.
else:
dirname, relfilename = os.path.split(filename)
if dirname not in self.svnls_cache:
cmd = ["svn", "list", "-r", self.rev_start, dirname or "."]
out, returncode = RunShellWithReturnCode(cmd)
if returncode:
ErrorExit("Failed to get status for %s." % filename)
old_files = out.splitlines()
args = ["svn", "list"]
if self.rev_end:
args += ["-r", self.rev_end]
cmd = args + [dirname or "."]
out, returncode = RunShellWithReturnCode(cmd)
if returncode:
ErrorExit("Failed to run command %s" % cmd)
self.svnls_cache[dirname] = (old_files, out.splitlines())
old_files, new_files = self.svnls_cache[dirname]
if relfilename in old_files and relfilename not in new_files:
status = "D "
elif relfilename in old_files and relfilename in new_files:
status = "M "
else:
status = "A "
return status
def GetBaseFile(self, filename):
status = self.GetStatus(filename)
base_content = None
new_content = None
# If a file is copied its status will be "A +", which signifies
# "addition-with-history". See "svn st" for more information. We need to
# upload the original file or else diff parsing will fail if the file was
# edited.
if status[0] == "A" and status[3] != "+":
# We'll need to upload the new content if we're adding a binary file
# since diff's output won't contain it.
mimetype = RunShell(["svn", "propget", "svn:mime-type", filename],
silent_ok=True)
base_content = ""
is_binary = bool(mimetype) and not mimetype.startswith("text/")
if is_binary and self.IsImage(filename):
new_content = self.ReadFile(filename)
elif (status[0] in ("M", "D", "R") or
(status[0] == "A" and status[3] == "+") or # Copied file.
(status[0] == " " and status[1] == "M")): # Property change.
args = []
if self.options.revision:
url = "%s/%s@%s" % (self.svn_base, filename, self.rev_start)
else:
# Don't change filename, it's needed later.
url = filename
args += ["-r", "BASE"]
cmd = ["svn"] + args + ["propget", "svn:mime-type", url]
mimetype, returncode = RunShellWithReturnCode(cmd)
if returncode:
# File does not exist in the requested revision.
# Reset mimetype, it contains an error message.
mimetype = ""
get_base = False
is_binary = bool(mimetype) and not mimetype.startswith("text/")
if status[0] == " ":
# Empty base content just to force an upload.
base_content = ""
elif is_binary:
if self.IsImage(filename):
get_base = True
if status[0] == "M":
if not self.rev_end:
new_content = self.ReadFile(filename)
else:
url = "%s/%s@%s" % (self.svn_base, filename, self.rev_end)
new_content = RunShell(["svn", "cat", url],
universal_newlines=True, silent_ok=True)
else:
base_content = ""
else:
get_base = True
if get_base:
if is_binary:
universal_newlines = False
else:
universal_newlines = True
if self.rev_start:
# "svn cat -r REV delete_file.txt" doesn't work. cat requires
# the full URL with "@REV" appended instead of using "-r" option.
url = "%s/%s@%s" % (self.svn_base, filename, self.rev_start)
base_content = RunShell(["svn", "cat", url],
universal_newlines=universal_newlines,
silent_ok=True)
else:
base_content = RunShell(["svn", "cat", filename],
universal_newlines=universal_newlines,
silent_ok=True)
if not is_binary:
args = []
if self.rev_start:
url = "%s/%s@%s" % (self.svn_base, filename, self.rev_start)
else:
url = filename
args += ["-r", "BASE"]
cmd = ["svn"] + args + ["propget", "svn:keywords", url]
keywords, returncode = RunShellWithReturnCode(cmd)
if keywords and not returncode:
base_content = self._CollapseKeywords(base_content, keywords)
else:
StatusUpdate("svn status returned unexpected output: %s" % status)
sys.exit(1)
return base_content, new_content, is_binary, status[0:5]
class GitVCS(VersionControlSystem):
"""Implementation of the VersionControlSystem interface for Git."""
def __init__(self, options):
super(GitVCS, self).__init__(options)
# Map of filename -> hash of base file.
self.base_hashes = {}
def GenerateDiff(self, extra_args):
# This is more complicated than svn's GenerateDiff because we must convert
# the diff output to include an svn-style "Index:" line as well as record
# the hashes of the base files, so we can upload them along with our diff.
if self.options.revision:
extra_args = [self.options.revision] + extra_args
gitdiff = RunShell(["git", "diff", "--full-index"] + extra_args)
svndiff = []
filecount = 0
filename = None
for line in gitdiff.splitlines():
match = re.match(r"diff --git a/(.*) b/.*$", line)
if match:
filecount += 1
filename = match.group(1)
svndiff.append("Index: %s\n" % filename)
else:
# The "index" line in a git diff looks like this (long hashes elided):
# index 82c0d44..b2cee3f 100755
# We want to save the left hash, as that identifies the base file.
match = re.match(r"index (\w+)\.\.", line)
if match:
self.base_hashes[filename] = match.group(1)
svndiff.append(line + "\n")
if not filecount:
ErrorExit("No valid patches found in output from git diff")
return "".join(svndiff)
def GetUnknownFiles(self):
status = RunShell(["git", "ls-files", "--exclude-standard", "--others"],
silent_ok=True)
return status.splitlines()
def GetBaseFile(self, filename):
hash = self.base_hashes[filename]
base_content = None
new_content = None
is_binary = False
if hash == "0" * 40: # All-zero hash indicates no base file.
status = "A"
base_content = ""
else:
status = "M"
base_content, returncode = RunShellWithReturnCode(["git", "show", hash])
if returncode:
ErrorExit("Got error status from 'git show %s'" % hash)
return (base_content, new_content, is_binary, status)
class MercurialVCS(VersionControlSystem):
"""Implementation of the VersionControlSystem interface for Mercurial."""
def __init__(self, options, repo_dir):
super(MercurialVCS, self).__init__(options)
# Absolute path to repository (we can be in a subdir)
self.repo_dir = os.path.normpath(repo_dir)
# Compute the subdir
cwd = os.path.normpath(os.getcwd())
assert cwd.startswith(self.repo_dir)
self.subdir = cwd[len(self.repo_dir):].lstrip(r"\/")
if self.options.revision:
self.base_rev = self.options.revision
else:
self.base_rev = RunShell(["hg", "parent", "-q"]).split(':')[1].strip()
def _GetRelPath(self, filename):
"""Get relative path of a file according to the current directory,
given its logical path in the repo."""
assert filename.startswith(self.subdir), filename
return filename[len(self.subdir):].lstrip(r"\/")
def GenerateDiff(self, extra_args):
# If no file specified, restrict to the current subdir
extra_args = extra_args or ["."]
cmd = ["hg", "diff", "--git", "-r", self.base_rev] + extra_args
data = RunShell(cmd, silent_ok=True)
svndiff = []
filecount = 0
for line in data.splitlines():
m = re.match("diff --git a/(\S+) b/(\S+)", line)
if m:
# Modify line to make it look like as it comes from svn diff.
# With this modification no changes on the server side are required
# to make upload.py work with Mercurial repos.
# NOTE: for proper handling of moved/copied files, we have to use
# the second filename.
filename = m.group(2)
svndiff.append("Index: %s" % filename)
svndiff.append("=" * 67)
filecount += 1
logging.info(line)
else:
svndiff.append(line)
if not filecount:
ErrorExit("No valid patches found in output from hg diff")
return "\n".join(svndiff) + "\n"
def GetUnknownFiles(self):
"""Return a list of files unknown to the VCS."""
args = []
status = RunShell(["hg", "status", "--rev", self.base_rev, "-u", "."],
silent_ok=True)
unknown_files = []
for line in status.splitlines():
st, fn = line.split(" ", 1)
if st == "?":
unknown_files.append(fn)
return unknown_files
def GetBaseFile(self, filename):
# "hg status" and "hg cat" both take a path relative to the current subdir
# rather than to the repo root, but "hg diff" has given us the full path
# to the repo root.
base_content = ""
new_content = None
is_binary = False
oldrelpath = relpath = self._GetRelPath(filename)
# "hg status -C" returns two lines for moved/copied files, one otherwise
out = RunShell(["hg", "status", "-C", "--rev", self.base_rev, relpath])
out = out.splitlines()
# HACK: strip error message about missing file/directory if it isn't in
# the working copy
if out[0].startswith('%s: ' % relpath):
out = out[1:]
if len(out) > 1:
# Moved/copied => considered as modified, use old filename to
# retrieve base contents
oldrelpath = out[1].strip()
status = "M"
else:
status, _ = out[0].split(' ', 1)
if status != "A":
base_content = RunShell(["hg", "cat", "-r", self.base_rev, oldrelpath],
silent_ok=True)
is_binary = "\0" in base_content # Mercurial's heuristic
if status != "R":
new_content = open(relpath, "rb").read()
is_binary = is_binary or "\0" in new_content
if is_binary and base_content:
# Fetch again without converting newlines
base_content = RunShell(["hg", "cat", "-r", self.base_rev, oldrelpath],
silent_ok=True, universal_newlines=False)
if not is_binary or not self.IsImage(relpath):
new_content = None
return base_content, new_content, is_binary, status
# NOTE: The SplitPatch function is duplicated in engine.py, keep them in sync.
def SplitPatch(data):
"""Splits a patch into separate pieces for each file.
Args:
data: A string containing the output of svn diff.
Returns:
A list of 2-tuple (filename, text) where text is the svn diff output
pertaining to filename.
"""
patches = []
filename = None
diff = []
for line in data.splitlines(True):
new_filename = None
if line.startswith('Index:'):
unused, new_filename = line.split(':', 1)
new_filename = new_filename.strip()
elif line.startswith('Property changes on:'):
unused, temp_filename = line.split(':', 1)
# When a file is modified, paths use '/' between directories, however
# when a property is modified '\' is used on Windows. Make them the same
# otherwise the file shows up twice.
temp_filename = temp_filename.strip().replace('\\', '/')
if temp_filename != filename:
# File has property changes but no modifications, create a new diff.
new_filename = temp_filename
if new_filename:
if filename and diff:
patches.append((filename, ''.join(diff)))
filename = new_filename
diff = [line]
continue
if diff is not None:
diff.append(line)
if filename and diff:
patches.append((filename, ''.join(diff)))
return patches
def UploadSeparatePatches(issue, rpc_server, patchset, data, options):
"""Uploads a separate patch for each file in the diff output.
Returns a list of [patch_key, filename] for each file.
"""
patches = SplitPatch(data)
rv = []
for patch in patches:
if len(patch[1]) > MAX_UPLOAD_SIZE:
print ("Not uploading the patch for " + patch[0] +
" because the file is too large.")
continue
form_fields = [("filename", patch[0])]
if not options.download_base:
form_fields.append(("content_upload", "1"))
files = [("data", "data.diff", patch[1])]
ctype, body = EncodeMultipartFormData(form_fields, files)
url = "/%d/upload_patch/%d" % (int(issue), int(patchset))
print "Uploading patch for " + patch[0]
response_body = rpc_server.Send(url, body, content_type=ctype)
lines = response_body.splitlines()
if not lines or lines[0] != "OK":
StatusUpdate(" --> %s" % response_body)
sys.exit(1)
rv.append([lines[1], patch[0]])
return rv
def GuessVCS(options):
"""Helper to guess the version control system.
This examines the current directory, guesses which VersionControlSystem
we're using, and returns an instance of the appropriate class. Exit with an
error if we can't figure it out.
Returns:
A VersionControlSystem instance. Exits if the VCS can't be guessed.
"""
# Mercurial has a command to get the base directory of a repository
# Try running it, but don't die if we don't have hg installed.
# NOTE: we try Mercurial first as it can sit on top of an SVN working copy.
try:
out, returncode = RunShellWithReturnCode(["hg", "root"])
if returncode == 0:
return MercurialVCS(options, out.strip())
except OSError, (errno, message):
if errno != 2: # ENOENT -- they don't have hg installed.
raise
# Subversion has a .svn in all working directories.
if os.path.isdir('.svn'):
logging.info("Guessed VCS = Subversion")
return SubversionVCS(options)
# Git has a command to test if you're in a git tree.
# Try running it, but don't die if we don't have git installed.
try:
out, returncode = RunShellWithReturnCode(["git", "rev-parse",
"--is-inside-work-tree"])
if returncode == 0:
return GitVCS(options)
except OSError, (errno, message):
if errno != 2: # ENOENT -- they don't have git installed.
raise
ErrorExit(("Could not guess version control system. "
"Are you in a working copy directory?"))
def RealMain(argv, data=None):
"""The real main function.
Args:
argv: Command line arguments.
data: Diff contents. If None (default) the diff is generated by
the VersionControlSystem implementation returned by GuessVCS().
Returns:
A 2-tuple (issue id, patchset id).
The patchset id is None if the base files are not uploaded by this
script (applies only to SVN checkouts).
"""
logging.basicConfig(format=("%(asctime).19s %(levelname)s %(filename)s:"
"%(lineno)s %(message)s "))
os.environ['LC_ALL'] = 'C'
options, args = parser.parse_args(argv[1:])
global verbosity
verbosity = options.verbose
if verbosity >= 3:
logging.getLogger().setLevel(logging.DEBUG)
elif verbosity >= 2:
logging.getLogger().setLevel(logging.INFO)
vcs = GuessVCS(options)
if isinstance(vcs, SubversionVCS):
# base field is only allowed for Subversion.
# Note: Fetching base files may become deprecated in future releases.
base = vcs.GuessBase(options.download_base)
else:
base = None
if not base and options.download_base:
options.download_base = True
logging.info("Enabled upload of base file")
if not options.assume_yes:
vcs.CheckForUnknownFiles()
if data is None:
data = vcs.GenerateDiff(args)
files = vcs.GetBaseFiles(data)
if options.min_pylint_score:
print "running pylint..."
has_low_score = 0
for file in files:
if re.search(r'[.]py$', file):
print "pylinting "+file+"..."
res = RunShell(["pylint", file], silent_ok=True, ignore_retcode=True)
match = re.search(r'Your code has been rated at ([0-9.-]+)', res)
try:
score = float(match.group(1))
except:
score = -1.0
print file,"rated at",score
if score < float(options.min_pylint_score):
has_low_score += 1
if has_low_score > 0:
print "pylint reported", has_low_score, \
"files with scores below", options.min_pylint_score
AreYouSureOrExit()
if verbosity >= 1:
print "Upload server:", options.server, "(change with -s/--server)"
if options.issue:
prompt = "Message describing this patch set: "
else:
prompt = "New issue subject: "
message = options.message or raw_input(prompt).strip()
if not message:
ErrorExit("A non-empty message is required")
rpc_server = GetRpcServer(options)
form_fields = [("subject", message)]
if base:
form_fields.append(("base", base))
if options.issue:
form_fields.append(("issue", str(options.issue)))
if options.email:
form_fields.append(("user", options.email))
if options.reviewers:
for reviewer in options.reviewers.split(','):
if "@" in reviewer and not reviewer.split("@")[1].count(".") == 1:
ErrorExit("Invalid email address: %s" % reviewer)
form_fields.append(("reviewers", options.reviewers))
if options.cc:
for cc in options.cc.split(','):
if "@" in cc and not cc.split("@")[1].count(".") == 1:
ErrorExit("Invalid email address: %s" % cc)
form_fields.append(("cc", options.cc))
description = options.description
if options.description_file:
if options.description:
ErrorExit("Can't specify description and description_file")
file = open(options.description_file, 'r')
description = file.read()
file.close()
if options.description_editor:
if options.description:
ErrorExit("Can't specify description and description_editor")
if options.description_file:
ErrorExit("Can't specify description_file and description_editor")
if 'EDITOR' not in os.environ:
ErrorExit("Please set the EDITOR environment variable.")
editor = os.environ['EDITOR']
if editor == None or editor == "":
ErrorExit("Please set the EDITOR environment variable.")
tempfh, filename = tempfile.mkstemp()
msg = "demo URL: http://your-url/foo/\ndescription: (start on next line)\n"
os.write(tempfh, msg)
os.close(tempfh)
print "running EDITOR:", editor, filename
cmd = editor + " " + filename
subprocess.call(cmd, shell=True)
file = open(filename, 'r')
description = file.read()
file.close()
os.unlink(filename)
print description
if description:
form_fields.append(("description", description))
# Send a hash of all the base file so the server can determine if a copy
# already exists in an earlier patchset.
base_hashes = ""
for file, info in files.iteritems():
if not info[0] is None:
checksum = md5.new(info[0]).hexdigest()
if base_hashes:
base_hashes += "|"
base_hashes += checksum + ":" + file
form_fields.append(("base_hashes", base_hashes))
# If we're uploading base files, don't send the email before the uploads, so
# that it contains the file status.
if options.send_mail and options.download_base:
form_fields.append(("send_mail", "1"))
if not options.download_base:
form_fields.append(("content_upload", "1"))
if len(data) > MAX_UPLOAD_SIZE:
print "Patch is large, so uploading file patches separately."
uploaded_diff_file = []
form_fields.append(("separate_patches", "1"))
else:
uploaded_diff_file = [("data", "data.diff", data)]
ctype, body = EncodeMultipartFormData(form_fields, uploaded_diff_file)
response_body = rpc_server.Send("/upload", body, content_type=ctype)
patchset = None
if not options.download_base or not uploaded_diff_file:
lines = response_body.splitlines()
if len(lines) >= 2:
msg = lines[0]
patchset = lines[1].strip()
patches = [x.split(" ", 1) for x in lines[2:]]
else:
msg = response_body
else:
msg = response_body
StatusUpdate(msg)
if not response_body.startswith("Issue created.") and \
not response_body.startswith("Issue updated."):
sys.exit(0)
issue = msg[msg.rfind("/")+1:]
if not uploaded_diff_file:
result = UploadSeparatePatches(issue, rpc_server, patchset, data, options)
if not options.download_base:
patches = result
if not options.download_base:
vcs.UploadBaseFiles(issue, rpc_server, patches, patchset, options, files)
if options.send_mail:
rpc_server.Send("/" + issue + "/mail", payload="")
return issue, patchset
FPREVIEW_ADDR = "footprint2009reviews.appspot.com"
def main():
try:
if len(sys.argv) == 1:
print "Usage:", sys.argv[0], "<email address of primary reviewer>"
print "(automatically cc's", FPREVIEW_ADDR, ")"
sys.exit(1)
args = [sys.argv[0], "-s", "footprint2009reviews.appspot.com"]
args.append("--cc=footprint-engreviews@googlegroups.com")
args.append("--description_editor")
args.append("--send_mail")
args.append("--min_pylint_score")
# we're starting with 9.0
args.append("9.0")
args.append("-r")
email = sys.argv[1]
if email.find("@") == -1:
email += "@gmail.com"
print >>sys.stderr, "*** sending to "+email+" for review. (note: @gmail.com)"
args.append(email)
sys.argv = args + sys.argv[2:]
if "PYLINTRC" not in os.environ:
testpath = os.getcwd()
while testpath != "" and not os.path.exists(testpath + "/pylintrc"):
testpath = re.sub(r'/[^/]*$', '', testpath)
print "checking for "+testpath + "/pylintrc"
if testpath == "":
print >>sys.stderr, "ERROR: couldn't find 'pylintrc' file."
sys.exit(1)
os.environ['PYLINTRC'] = testpath + "/pylintrc"
print "guessing PYLINTRC="+os.environ['PYLINTRC']
print "running: ", " ".join(sys.argv)
RealMain(sys.argv)
except KeyboardInterrupt:
print
StatusUpdate("Interrupted.")
sys.exit(1)
if __name__ == "__main__":
main()
| Python |
#!/usr/bin/env python
#
# Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Exports TSV data over HTTP.
Usage:
%s [flags]
--url=<string> URL endpoint to get exported data. (Required)
--batch_size=<int> Number of Entity objects to include in each post to
smaller the batch size should be. (Default 1000)
--filename=<path> Path to the TSV file to export. (Required)
--digsig=<string> value passed to endpoint permitting export
The exit status will be 0 on success, non-zero on failure.
"""
import sys
import re
import logging
import getopt
import urllib2
import datetime
def PrintUsageExit(code):
print sys.modules['__main__'].__doc__ % sys.argv[0]
sys.stdout.flush()
sys.stderr.flush()
sys.exit(code)
def Pull(filename, url, min_key, delim, prefix):
# get content from url and write to filename
try:
connection = urllib2.urlopen(url);
# TODO: read 100 lines incrementally and show progress
content = connection.read()
connection.close()
except urllib2.URLError, e:
logging.error('%s returned error %i, %s' % (url, e.code, e.msg))
sys.exit(2)
try:
tsv_file = file(filename, 'a')
except IOError:
logging.error("I/O error({0}): {1}".format(errno, os.strerror(errno)))
sys.exit(3)
if prefix:
lines = content.split("\n")
lines.pop()
content = ("%s" % prefix) + ("\n%s" % prefix).join(lines) + "\n"
tsv_file.write(content)
tsv_file.close()
# count the number of lines
list = content.splitlines()
line_count = len(list)
last_line = list[line_count - 1]
if min_key == "":
# that's our header, don't count it
line_count -= 1
# get the key value of the last line
fields = last_line.split(delim)
min_key = fields[0][4:]
return min_key, line_count
def ParseArguments(argv):
opts, args = getopt.getopt(
argv[1:],
'dh',
['debug', 'help',
'url=', 'filename=', 'prefix=', 'digsig=', 'batch_size='
])
url = None
filename = None
digsig = ''
prefix = ''
batch_size = 1000
for option, value in opts:
if option == '--debug':
logging.getLogger().setLevel(logging.DEBUG)
if option in ('-h', '--help'):
PrintUsageExit(0)
if option == '--url':
url = value
if option == '--filename':
filename = value
if option == '--prefix':
prefix = value
if option == '--digsig':
digsig = value
if option == '--batch_size':
batch_size = int(value)
if batch_size <= 0:
print >>sys.stderr, 'batch_size must be 1 or larger'
PrintUsageExit(1)
return (url, filename, batch_size, prefix, digsig)
def main(argv):
logging.basicConfig(
level=logging.INFO,
format='%(levelname)-8s %(asctime)s %(message)s')
args = ParseArguments(argv)
if [arg for arg in args if arg is None]:
print >>sys.stderr, 'Invalid arguments'
PrintUsageExit(1)
url, filename, batch_size, prefix, digsig = args
delim = "\t"
min_key = ""
lines = batch_size + 2
while lines >= batch_size:
url_step = ("%s?digsig=%s&min_key=%s&limit=%s" %
(url, str(digsig), str(min_key), str(batch_size)))
if min_key != "":
log_key = min_key
else:
log_key = "[start]"
t0 = datetime.datetime.now()
min_key, lines = Pull(filename, url_step, min_key, delim, prefix)
#print min_key
diff = datetime.datetime.now() - t0
secs = "%d.%d" % (diff.seconds, diff.microseconds/1000)
logging.info('fetched header + %d in %s secs from %s', lines, secs, log_key)
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
| Python |
#!/usr/bin/env python
#
# Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Creates backup of tables.
"""
import sys
import logging
import getopt
import urllib2
import datetime
from datetime import date
def print_usage_exit(code):
""" print usage and exit """
print sys.modules['__main__'].__doc__ % sys.argv[0]
sys.stdout.flush()
sys.stderr.flush()
sys.exit(code)
def handle_response(url):
""" read the last key and the number of records copied """
try:
connection = urllib2.urlopen(url)
content = connection.read()
connection.close()
except urllib2.URLError, eobj:
logging.error('%s returned error %i, %s' % (url, eobj.code, eobj.msg))
sys.exit(2)
last_key = ""
rows = 0
lines = content.split("\n")
for line in lines:
field = line.split("\t")
if field[0] == "rows":
rows = int(field[1])
elif field[0] == "last_key":
last_key = field[1]
return last_key, rows
def parse_arguments(argv):
""" parse arguments """
opts, args = getopt.getopt(
argv[1:],
'dh',
['debug', 'help', 'url=', 'table=',
'backup_version=', 'restore_version=', 'digsig=', 'batch_size='
])
def lzero(number_string):
""" prepend 0 if length less than 2 """
rtn = number_string
while len(rtn) < 2:
rtn = '0' + rtn
return rtn
url = "http://footprint2009dev.appspot.com/export"
table = ''
tod = date.today()
backup_version = str(tod.year) + lzero(str(tod.month)) + lzero(str(tod.day))
restore_version = ''
digsig = ''
batch_size = 1000
for option, value in opts:
if option == '--debug':
logging.getLogger().setLevel(logging.DEBUG)
if option in ('-h', '--help'):
print_usage_exit(0)
if option == '--url':
url = value
if option == '--backup_version':
backup_version = value
if restore_version:
print >> sys.stderr, 'backup and restore are mutually exclusive'
print_usage_exit(1)
if option == '--restore_version':
restore_version = value
if backup_version:
print >> sys.stderr, 'backup and restore are mutually exclusive'
print_usage_exit(1)
if option == '--table':
table = value
if option == '--digsig':
digsig = value
if option == '--batch_size':
batch_size = int(value)
if batch_size <= 0:
print >> sys.stderr, 'batch_size must be 1 or larger'
print_usage_exit(1)
opts = args # because pylint said args was unused
return (url, table, backup_version, restore_version, batch_size, digsig)
def main(argv):
""" start here """
logging.basicConfig(
level=logging.INFO,
format='%(levelname)-8s %(asctime)s %(message)s')
args = parse_arguments(argv)
if [arg for arg in args if arg is None]:
print >> sys.stderr, 'Invalid arguments'
print_usage_exit(1)
base_url, table, backup_version, restore_version, batch_size, digsig = args
if not base_url:
print >> sys.stderr, 'specify url'
print_usage_exit(1)
if backup_version:
url = "%s/%s/%s_%s" % (base_url, table, table, backup_version)
elif restore_version:
url = "%s/%s_%s/%s" % (base_url, table, table, restore_version)
else:
print >> sys.stderr, 'specify either backup_version or restore_version'
print_usage_exit(1)
min_key = ''
lines = batch_size
while lines == batch_size:
url_step = ("%s?digsig=%s&min_key=%s&limit=%s" %
(url, str(digsig), str(min_key), str(batch_size)))
if min_key != "":
log_key = min_key
else:
log_key = "[start]"
start_time = datetime.datetime.now()
min_key, lines = handle_response(url_step)
diff = datetime.datetime.now() - start_time
secs = "%d.%d" % (diff.seconds, diff.microseconds/1000)
logging.info('fetched %d in %s secs from %s', lines, secs, log_key)
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
| Python |
#!/usr/bin/python
# Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# TODO: remove silly dependency on dapper.net-- thought I'd need
# it for the full scrape, but ended up not going that way.
"""open source load testing tool for footprint."""
import sys
import os
import urllib
import urlparse
import re
import thread
import time
from datetime import datetime
import socket
import random
import cookielib
import getpass
import logging
import hashlib
import mimetypes
import optparse
import os
import re
import socket
import subprocess
import sys
import urllib
import urllib2
import urlparse
import tempfile
try:
import readline
except ImportError:
logging.debug("readline not found.")
pass
# match appengine's timeout
DEFAULT_TIMEOUT = 30
socket.setdefaulttimeout(DEFAULT_TIMEOUT)
# to identify pages vs. hits, we prefix page with a given name
PAGE_NAME_PREFIX = "page_"
# The logging verbosity:
# 0: Errors only.
# 1: Status messages.
# 2: Info logs.
# 3: Debug logs.
VERBOSITY = 1
def AreYouSureOrExit(exit_if_no=True):
prompt = "Are you sure you want to continue?(y/N) "
answer = raw_input(prompt).strip()
if exit_if_no and answer.lower() != "y":
ErrorExit("User aborted")
return answer.lower() == "y"
def GetEmail(prompt):
"""Prompts the user for their email address and returns it.
The last used email address is saved to a file and offered up as a suggestion
to the user. If the user presses enter without typing in anything the last
used email address is used. If the user enters a new address, it is saved
for next time we prompt.
"""
last_email_file_name = os.path.expanduser("~/.last_loadtest_email_address")
last_email = ""
if os.path.exists(last_email_file_name):
try:
last_email_file = open(last_email_file_name, "r")
last_email = last_email_file.readline().strip("\n")
last_email_file.close()
prompt += " [%s]" % last_email
except IOError, e:
pass
email = raw_input(prompt + ": ").strip()
if email:
try:
last_email_file = open(last_email_file_name, "w")
last_email_file.write(email)
last_email_file.close()
except IOError, e:
pass
else:
email = last_email
if email.find("@") == -1:
email += "@gmail.com"
print "assuming you mean "+email+"@gmail.com"
return email
def StatusUpdate(msg):
"""Print a status message to stdout.
If 'VERBOSITY' is greater than 0, print the message.
Args:
msg: The string to print.
"""
if VERBOSITY > 0:
print msg
def ErrorExit(msg):
"""Print an error message to stderr and exit."""
print >>sys.stderr, msg
sys.exit(1)
class ClientLoginError(urllib2.HTTPError):
"""Raised to indicate there was an error authenticating with ClientLogin."""
def __init__(self, url, code, msg, headers, args):
urllib2.HTTPError.__init__(self, url, code, msg, headers, None)
self.args = args
self.reason = args["Error"]
class AbstractRpcServer(object):
"""Provides a common interface for a simple RPC server."""
def __init__(self, host, auth_function, host_override=None, extra_headers={},
save_cookies=False):
"""Creates a new HttpRpcServer.
Args:
host: The host to send requests to.
auth_function: A function that takes no arguments and returns an
(email, password) tuple when called. Will be called if authentication
is required.
host_override: The host header to send to the server (defaults to host).
extra_headers: A dict of extra headers to append to every request.
save_cookies: If True, save the authentication cookies to local disk.
If False, use an in-memory cookiejar instead. Subclasses must
implement this functionality. Defaults to False.
"""
self.host = host
self.host_override = host_override
self.auth_function = auth_function
self.authenticated = False
self.extra_headers = extra_headers
self.save_cookies = save_cookies
self.opener = self._GetOpener()
if self.host_override:
logging.info("Server: %s; Host: %s", self.host, self.host_override)
else:
logging.info("Server: %s", self.host)
def _GetOpener(self):
"""Returns an OpenerDirector for making HTTP requests.
Returns:
A urllib2.OpenerDirector object.
"""
raise NotImplementedError()
def _CreateRequest(self, url, data=None):
"""Creates a new urllib request."""
logging.debug("Creating request for: '%s' with payload:\n%s", url, data)
req = urllib2.Request(url, data=data)
if self.host_override:
req.add_header("Host", self.host_override)
for key, value in self.extra_headers.iteritems():
req.add_header(key, value)
return req
def _GetAuthToken(self, email, password):
"""Uses ClientLogin to authenticate the user, returning an auth token.
Args:
email: The user's email address
password: The user's password
Raises:
ClientLoginError: If there was an error authenticating with ClientLogin.
HTTPError: If there was some other form of HTTP error.
Returns:
The authentication token returned by ClientLogin.
"""
account_type = "GOOGLE"
if self.host.endswith(".google.com"):
# Needed for use inside Google.
account_type = "HOSTED"
account_type = "GOOGLE"
req = self._CreateRequest(
url="https://www.google.com/accounts/ClientLogin",
data=urllib.urlencode({
"Email": email,
"Passwd": password,
"service": "ah",
"source": "rietveld-codereview-upload",
"accountType": account_type,
}),
)
try:
response = self.opener.open(req)
response_body = response.read()
response_dict = dict(x.split("=")
for x in response_body.split("\n") if x)
return response_dict["Auth"]
except urllib2.HTTPError, e:
if e.code == 403:
body = e.read()
response_dict = dict(x.split("=", 1) for x in body.split("\n") if x)
raise ClientLoginError(req.get_full_url(), e.code, e.msg,
e.headers, response_dict)
else:
raise
def _GetAuthCookie(self, auth_token):
"""Fetches authentication cookies for an authentication token.
Args:
auth_token: The authentication token returned by ClientLogin.
Raises:
HTTPError: If there was an error fetching the authentication cookies.
"""
# This is a dummy value to allow us to identify when we're successful.
continue_location = "http://localhost/"
args = {"continue": continue_location, "auth": auth_token}
req = self._CreateRequest("http://%s/_ah/login?%s" %
(self.host, urllib.urlencode(args)))
try:
response = self.opener.open(req)
except urllib2.HTTPError, e:
response = e
if (response.code != 302 or
response.info()["location"] != continue_location):
raise urllib2.HTTPError(req.get_full_url(), response.code, response.msg,
response.headers, response.fp)
self.authenticated = True
def _Authenticate(self):
"""Authenticates the user.
The authentication process works as follows:
1) We get a username and password from the user
2) We use ClientLogin to obtain an AUTH token for the user
(see http://code.google.com/apis/accounts/AuthForInstalledApps.html).
3) We pass the auth token to /_ah/login on the server to obtain an
authentication cookie. If login was successful, it tries to redirect
us to the URL we provided.
If we attempt to access the upload API without first obtaining an
authentication cookie, it returns a 401 response and directs us to
authenticate ourselves with ClientLogin.
"""
for i in range(3):
credentials = self.auth_function()
try:
auth_token = self._GetAuthToken(credentials[0], credentials[1])
except ClientLoginError, e:
if e.reason == "BadAuthentication":
print >>sys.stderr, "Invalid username or password."
continue
if e.reason == "CaptchaRequired":
print >>sys.stderr, (
"Please go to\n"
"https://www.google.com/accounts/DisplayUnlockCaptcha\n"
"and verify you are a human. Then try again.")
break
if e.reason == "NotVerified":
print >>sys.stderr, "Account not verified."
break
if e.reason == "TermsNotAgreed":
print >>sys.stderr, "User has not agreed to TOS."
break
if e.reason == "AccountDeleted":
print >>sys.stderr, "The user account has been deleted."
break
if e.reason == "AccountDisabled":
print >>sys.stderr, "The user account has been disabled."
break
if e.reason == "ServiceDisabled":
print >>sys.stderr, ("The user's access to the service has been "
"disabled.")
break
if e.reason == "ServiceUnavailable":
print >>sys.stderr, "The service is not available; try again later."
break
raise
self._GetAuthCookie(auth_token)
return
def Send(self, request_path, payload=None,
content_type="application/octet-stream",
timeout=None,
**kwargs):
"""Sends an RPC and returns the response.
Args:
request_path: The path to send the request to, eg /api/appversion/create.
payload: The body of the request, or None to send an empty request.
content_type: The Content-Type header to use.
timeout: timeout in seconds; default None i.e. no timeout.
(Note: for large requests on OS X, the timeout doesn't work right.)
kwargs: Any keyword arguments are converted into query string parameters.
Returns:
The response body, as a string.
"""
# TODO: Don't require authentication. Let the server say
# whether it is necessary.
if not self.authenticated:
self._Authenticate()
old_timeout = socket.getdefaulttimeout()
socket.setdefaulttimeout(timeout)
try:
tries = 0
while True:
tries += 1
args = dict(kwargs)
url = "http://%s%s" % (self.host, request_path)
if args:
url += "?" + urllib.urlencode(args)
req = self._CreateRequest(url=url, data=payload)
req.add_header("Content-Type", content_type)
try:
f = self.opener.open(req)
response = f.read()
f.close()
return response
except urllib2.HTTPError, e:
if tries > 3:
raise
elif e.code == 302 or e.code == 401:
self._Authenticate()
elif e.code >= 500 and e.code < 600:
# Server Error - try again.
print "server error "+str(e.code)+": sleeping and retrying..."
time.sleep(1)
continue
else:
raise
finally:
socket.setdefaulttimeout(old_timeout)
class HttpRpcServer(AbstractRpcServer):
"""Provides a simplified RPC-style interface for HTTP requests."""
def _Authenticate(self):
"""Save the cookie jar after authentication."""
super(HttpRpcServer, self)._Authenticate()
if self.save_cookies:
StatusUpdate("Saving authentication cookies to %s" % self.cookie_file)
self.cookie_jar.save()
def _GetOpener(self):
"""Returns an OpenerDirector that supports cookies and ignores redirects.
Returns:
A urllib2.OpenerDirector object.
"""
opener = urllib2.OpenerDirector()
opener.add_handler(urllib2.ProxyHandler())
opener.add_handler(urllib2.UnknownHandler())
opener.add_handler(urllib2.HTTPHandler())
opener.add_handler(urllib2.HTTPDefaultErrorHandler())
opener.add_handler(urllib2.HTTPSHandler())
opener.add_handler(urllib2.HTTPErrorProcessor())
if self.save_cookies:
self.cookie_file = os.path.expanduser("~/.loadtest_cookies")
self.cookie_jar = cookielib.MozillaCookieJar(self.cookie_file)
if os.path.exists(self.cookie_file):
try:
self.cookie_jar.load()
self.authenticated = True
StatusUpdate("Loaded authentication cookies from %s" %
self.cookie_file)
except (cookielib.LoadError, IOError):
# Failed to load cookies - just ignore them.
pass
else:
# Create an empty cookie file with mode 600
fd = os.open(self.cookie_file, os.O_CREAT, 0600)
os.close(fd)
# Always chmod the cookie file
os.chmod(self.cookie_file, 0600)
else:
# Don't save cookies across runs of update.py.
self.cookie_jar = cookielib.CookieJar()
opener.add_handler(urllib2.HTTPCookieProcessor(self.cookie_jar))
return opener
def GetRpcServer(options):
"""Returns an instance of an AbstractRpcServer.
Returns:
A new AbstractRpcServer, on which RPC calls can be made.
"""
def GetUserCredentials():
"""Prompts the user for a username and password."""
email = options.email
if email is None:
email = GetEmail("Email (for capturing appengine quota details)")
password = getpass.getpass("Password for %s: " % email)
return (email, password)
if options.server is None:
options.server = "appengine.google.com"
return HttpRpcServer(options.server, GetUserCredentials,
host_override=options.host,
save_cookies=options.save_cookies)
START_TS = None
RUNNING = False
def start_running():
"""official kickoff, i.e. after any interaction commands."""
global RUNNING, START_TS
RUNNING = True
START_TS = datetime.now()
def secs_since(ts1, ts2):
"""compute seconds since start_running()."""
delta_ts = ts2 - ts1
return 3600*24.0*delta_ts.days + \
1.0*delta_ts.seconds + \
delta_ts.microseconds / 1000000.0
def perfstats(hits, pageviews):
"""computes QPS since start."""
global START_TS
secs_elapsed = secs_since(START_TS, datetime.now())
hit_qps = hits / float(secs_elapsed + 0.01)
pageview_qps = pageviews / float(secs_elapsed + 0.01)
return (secs_elapsed, hit_qps, pageview_qps)
RESULTS = []
RESULTS_lock = thread.allocate_lock()
def append_results(res):
RESULTS_lock.acquire()
RESULTS.append(res)
RESULTS_lock.release()
REQUEST_TYPES = {}
CACHE_HITRATE = {}
REQUEST_FREQ = []
def register_request_type(name, func, freq=10, cache_hitrate="50%"):
"""setup a test case. Default to positive hitrate so we get warm vs.
cold cache stats. Freq is the relative frequency for this type of
request-- larger numbers = larger percentage for the blended results."""
REQUEST_TYPES[name] = func
CACHE_HITRATE[name] = int(re.sub(r'%', '', str(cache_hitrate).strip()))
for i in range(freq):
REQUEST_FREQ.append(name)
#BASE_URL = "http://footprint2009dev.appspot.com/"
BASE_URL = "http://footprint-loadtest.appspot.com/"
def disable_caching(url):
"""footprint-specific method to disable caching."""
if url.find("?") > 0:
# note: ?& is harmless
return url + "&cache=0"
else:
return url + "?cache=0"
URLS_SEEN = {}
def make_request(cached, url):
"""actually make HTTP request."""
if not cached:
url = disable_caching(url)
if url not in URLS_SEEN:
seen_url = re.sub(re.compile("^"+BASE_URL), '/', url)
print "fetching "+seen_url
URLS_SEEN[url] = True
try:
infh = urllib.urlopen(url)
content = infh.read()
except:
print "error reading "+url
content = ""
return content
def search_url(base, loc="Chicago,IL", keyword="park"):
"""construct FP search URL, defaulting to [park] near [Chicago,IL]"""
if BASE_URL[-1] == '/' and base[0] == '/':
url = BASE_URL+base[1:]
else:
url = BASE_URL+base
if loc and loc != "":
url += "&vol_loc="+loc
if keyword and keyword != "":
url += "&q="+keyword
return url
def error_request(name, cached=False):
"""requests for 404 junk on the site. Here mostly to prove that
the framework does catch errors."""
if make_request(cached, BASE_URL+"foo") == "":
return ""
return "no content"
register_request_type("error", error_request, freq=5)
def static_url():
"""all static requests are roughly equivalent."""
return BASE_URL+"images/background-gradient.png"
def fp_find_embedded_objects(base_url, content):
"""cheesy little HTML parser, which also approximates browser caching
of items on both / and /ui_snippets."""
objs = []
# strip newlines/etc. used in formatting
content = re.sub(r'\s+', ' ', content)
# one HTML element per line
content = re.sub(r'>', '>\n', content)
for line in content.split('\n'):
#print "found line: "+line
match = re.search(r'<(?:img[^>]+src|script[^>]+src|link[^>]+href)\s*=\s*(.+)',
line)
if match:
match2 = re.search(r'^["\'](.+?)["\']', match.group(1))
url = match2.group(1)
url = re.sub(r'[.][.]/images/', 'images/', url)
url = urlparse.urljoin(base_url, url)
#print "found url: "+url+"\n on base: "+base_url
if url not in objs:
objs.append(url)
return objs
static_content_request_queue = []
static_content_request_lock = thread.allocate_lock()
def fetch_static_content(base_url, content):
"""find the embedded JS/CSS/images and request them."""
urls = fp_find_embedded_objects(base_url, content)
static_content_request_lock.acquire()
static_content_request_queue.extend(urls)
static_content_request_lock.release()
def static_fetcher_main():
"""thread for fetching static content."""
while RUNNING:
if len(static_content_request_queue) == 0:
time.sleep(1)
continue
url = None
static_content_request_lock.acquire()
if len(static_content_request_queue) > 0:
url = static_content_request_queue.pop(0)
static_content_request_lock.release()
if url:
# for static content, caching means client/proxy-side
cached = (random.randint(0, 99) < OPTIONS.static_content_hitrate)
if cached:
continue
ts1 = datetime.now()
content = make_request(False, url)
elapsed = secs_since(ts1, datetime.now())
result_name = "static content requests"
if content == "":
result_name += " (errors)"
append_results([result_name, elapsed])
def homepage_request(name, cached=False):
"""request to FP homepage."""
content = make_request(cached, BASE_URL)
content += make_request(cached, search_url("/ui_snippets?", keyword=""))
return content
register_request_type("page_home", homepage_request)
def initial_serp_request(name, cached=False):
content = make_request(cached, search_url("/search#"))
content += make_request(cached, search_url("/ui_snippets?"))
return content
# don't expect much caching-- use 10% hitrate so we can see warm vs. cold stats
register_request_type("page_serp_initial", initial_serp_request, cache_hitrate="10%")
def nextpage_serp_request(name, cached=False):
# statistically, nextpage is page 2
# 50% hitrate due to the overfetch algorithm
if make_request(cached, search_url("/ui_snippets?start=11")) == "":
return ""
# we expect next-page static content to be 100% cacheable
# so don't return content
return "no content"
# nextpage is relatively rare, but this includes all pagination requests
register_request_type("page_serp_next", nextpage_serp_request, freq=5)
def api_request(name, cached=False):
# API calls are probably more likely to ask for more results and/or paginate
if make_request(cached, search_url("/api/volopps?num=20&key=testkey")) == "":
return ""
# API requests don't create static content requests
return "no content"
# until we have more apps, API calls will be rare
register_request_type("page_api", api_request, freq=2)
def setup_tests():
request_type_counts = {}
for name in REQUEST_FREQ:
if name in request_type_counts:
request_type_counts[name] += 1.0
else:
request_type_counts[name] = 1.0
print "OPTIONS.page_fetchers: %d" % OPTIONS.page_fetchers
print "OPTIONS.static_fetchers: %d" % OPTIONS.static_fetchers
print "OPTIONS.static_content_hitrate: %d%%" % OPTIONS.static_content_hitrate
print "request type breakdown:"
for name, cnt in request_type_counts.iteritems():
print " %4.1f%% - %4d%% cache hitrate - %s" % \
(100.0*cnt/float(len(REQUEST_FREQ)), CACHE_HITRATE[name], name)
def run_tests():
# give the threading system a chance to startup
while RUNNING:
testname = REQUEST_FREQ[random.randint(0, len(REQUEST_FREQ)-1)]
func = REQUEST_TYPES[testname]
cached = (random.randint(0, 99) < CACHE_HITRATE[testname])
ts1 = datetime.now()
content = func(testname, cached)
elapsed = secs_since(ts1, datetime.now())
if cached:
result_name = testname + " (warm cache)"
else:
result_name = testname + " (cold cache)"
# don't count static content towards latency--
# too hard to model CSS/JS execution costs, HTTP pipelining
# and parallel fetching. But we do want to create load on the
# servers
if content and content != "":
fetch_static_content(BASE_URL, content)
else:
result_name = testname + " (errors)"
append_results([result_name, elapsed])
def main():
global RUNNING
setup_tests()
start_running()
for i in range(OPTIONS.page_fetchers):
thread.start_new_thread(run_tests, ())
for i in range(OPTIONS.static_fetchers):
thread.start_new_thread(static_fetcher_main, ())
while RUNNING:
time.sleep(2)
pageviews = 0
hit_reqs = len(RESULTS)
# important to look at a snapshot-- RESULTS is appended by other threads
for i in range(0, hit_reqs-1):
if RESULTS[i][0].find(PAGE_NAME_PREFIX) == 0:
pageviews += 1
total_secs_elapsed, hit_qps, pageview_qps = perfstats(hit_reqs, pageviews)
print " %4.1f: %d hits (%.1f hits/sec), %d pageviews (%.1f pv/sec)" % \
(total_secs_elapsed, len(RESULTS), hit_qps, pageviews, pageview_qps)
sum_elapsed_time = {}
counts = {}
for i in range(0, hit_reqs-1):
name, elapsed_time = RESULTS[i]
if name in sum_elapsed_time:
sum_elapsed_time[name] += elapsed_time
counts[name] += 1
else:
sum_elapsed_time[name] = elapsed_time
counts[name] = 1
total_counts = 0
for name in counts:
total_counts += counts[name]
for name in sorted(sum_elapsed_time):
print " %4d requests (%4.1f%%), %6dms avg latency for %s" % \
(counts[name], float(counts[name]*100)/float(total_counts+0.01),
int(1000*sum_elapsed_time[name]/counts[name]), name)
if total_secs_elapsed >= OPTIONS.run_time:
RUNNING = False
OPTIONS = None
def get_options():
global OPTIONS
parser = optparse.OptionParser(usage="%prog [options]")
# testing options
group = parser.add_option_group("Load testing options")
group.add_option("-r", "--run_time", type="int", default=20,
dest="run_time",
help="how long to run the test (seconds).")
group.add_option("-n", "--page_fetchers", type="int", dest="page_fetchers",
default=4, help="how many pageview fetchers.")
group.add_option("--static_fetchers", type="int", dest="static_fetchers",
default=3, help="how many static content fetchers.")
group.add_option("--static_content_hitrate", type="int",
dest="static_content_hitrate", default=80,
help="client-side hitrate on static content (percent)."+
"note: 100 = don't simulate fetching of static content.")
# server
group = parser.add_option_group("Quota server options")
group.add_option("-s", "--server", action="store", dest="server",
default="appengine.google.com",
metavar="SERVER",
help=("The server with the quota info. The format is host[:port]. "
"Defaults to 'appengine.google.com'."))
group.add_option("-e", "--email", action="store", dest="email",
metavar="EMAIL", default=None,
help="The username to use. Will prompt if omitted.")
group.add_option("-H", "--host", action="store", dest="host",
metavar="HOST", default=None,
help="Overrides the Host header sent with all RPCs.")
group.add_option("--no_cookies", action="store_false",
dest="save_cookies", default=True,
help="Do not save authentication cookies to local disk.")
OPTIONS, args = parser.parse_args(sys.argv[1:])
def get_quota_details():
global OPTIONS
rpc_server = GetRpcServer(OPTIONS)
response_body = rpc_server.Send("/dashboard/quotadetails",
app_id="footprint-loadtest")
# get everything onto one line for easy parsing
content = re.sub("\n", " ", response_body)
content = re.sub("\s+", " ", content)
content = re.sub("> <", "><", content)
content = re.sub("<h3>", "\n<h3>", content)
details = {}
for line in content.split("\n"):
for header in re.finditer("<h3>(.+?)</h3>", line):
category = header.group(1)
for match in re.finditer('<tr><td>([a-zA-Z ]+)</td><td>.+?'+
'>\s*([0-9.+-]+) of ([0-9.+-]+)( [a-zA-Z0-9 ]+ )?',
line):
name = match.group(1)
value = float(match.group(2))
quota = float(match.group(3))
units = match.group(4)
if units == None:
units = ""
else:
units = units.strip()
if name != category:
name = re.sub(re.compile(category+"\s*"), r'', name)
details[category+"."+name] = [value, quota, units]
return details
def fmtnum(num):
"""add commas to a float."""
num = str(num)
while True:
oldnum = num
num = re.sub(r'(\d)(\d\d\d[^\d])', r'\1,\2', oldnum)
if oldnum == num:
break
num = re.sub(r'([.]\d\d)\d+$', r'\1', num)
num = re.sub(r'[.]0+$', r'', num)
return num
if __name__ == "__main__":
#logging.getLogger().setLevel(logging.DEBUG)
get_options()
start_details = get_quota_details()
main()
end_details = get_quota_details()
for key in start_details:
startval = start_details[key][0]
endval = end_details[key][0]
quota = end_details[key][1]
units = end_details[key][2]
delta = endval - startval
day_delta = 86400.0 / OPTIONS.run_time * delta
if quota > 0.0:
delta_pct = "%.1f%%" % (100.0 * day_delta / quota)
else:
delta_pct = "0.0%"
if delta < 0.0001:
continue
print "%45s: %6s of quota: %s used, which scales to %s of %s %s / day." % \
(key, delta_pct, fmtnum(delta), fmtnum(day_delta), fmtnum(quota), units)
| Python |
#!/usr/bin/python
# Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# TODO: remove silly dependency on dapper.net-- thought I'd need
# it for the full scrape, but ended up not going that way.
"""open source load testing tool for footprint."""
import sys
import os
import urllib
import urlparse
import re
import thread
import time
from datetime import datetime
import socket
import random
import cookielib
import getpass
import logging
import hashlib
import mimetypes
import optparse
import os
import re
import socket
import subprocess
import sys
import urllib
import urllib2
import urlparse
import tempfile
try:
import readline
except ImportError:
logging.debug("readline not found.")
pass
# match appengine's timeout
DEFAULT_TIMEOUT = 30
socket.setdefaulttimeout(DEFAULT_TIMEOUT)
# to identify pages vs. hits, we prefix page with a given name
PAGE_NAME_PREFIX = "page_"
# The logging verbosity:
# 0: Errors only.
# 1: Status messages.
# 2: Info logs.
# 3: Debug logs.
VERBOSITY = 1
def AreYouSureOrExit(exit_if_no=True):
prompt = "Are you sure you want to continue?(y/N) "
answer = raw_input(prompt).strip()
if exit_if_no and answer.lower() != "y":
ErrorExit("User aborted")
return answer.lower() == "y"
def GetEmail(prompt):
"""Prompts the user for their email address and returns it.
The last used email address is saved to a file and offered up as a suggestion
to the user. If the user presses enter without typing in anything the last
used email address is used. If the user enters a new address, it is saved
for next time we prompt.
"""
last_email_file_name = os.path.expanduser("~/.last_loadtest_email_address")
last_email = ""
if os.path.exists(last_email_file_name):
try:
last_email_file = open(last_email_file_name, "r")
last_email = last_email_file.readline().strip("\n")
last_email_file.close()
prompt += " [%s]" % last_email
except IOError, e:
pass
email = raw_input(prompt + ": ").strip()
if email:
try:
last_email_file = open(last_email_file_name, "w")
last_email_file.write(email)
last_email_file.close()
except IOError, e:
pass
else:
email = last_email
if email.find("@") == -1:
email += "@gmail.com"
print "assuming you mean "+email+"@gmail.com"
return email
def StatusUpdate(msg):
"""Print a status message to stdout.
If 'VERBOSITY' is greater than 0, print the message.
Args:
msg: The string to print.
"""
if VERBOSITY > 0:
print msg
def ErrorExit(msg):
"""Print an error message to stderr and exit."""
print >>sys.stderr, msg
sys.exit(1)
class ClientLoginError(urllib2.HTTPError):
"""Raised to indicate there was an error authenticating with ClientLogin."""
def __init__(self, url, code, msg, headers, args):
urllib2.HTTPError.__init__(self, url, code, msg, headers, None)
self.args = args
self.reason = args["Error"]
class AbstractRpcServer(object):
"""Provides a common interface for a simple RPC server."""
def __init__(self, host, auth_function, host_override=None, extra_headers={},
save_cookies=False):
"""Creates a new HttpRpcServer.
Args:
host: The host to send requests to.
auth_function: A function that takes no arguments and returns an
(email, password) tuple when called. Will be called if authentication
is required.
host_override: The host header to send to the server (defaults to host).
extra_headers: A dict of extra headers to append to every request.
save_cookies: If True, save the authentication cookies to local disk.
If False, use an in-memory cookiejar instead. Subclasses must
implement this functionality. Defaults to False.
"""
self.host = host
self.host_override = host_override
self.auth_function = auth_function
self.authenticated = False
self.extra_headers = extra_headers
self.save_cookies = save_cookies
self.opener = self._GetOpener()
if self.host_override:
logging.info("Server: %s; Host: %s", self.host, self.host_override)
else:
logging.info("Server: %s", self.host)
def _GetOpener(self):
"""Returns an OpenerDirector for making HTTP requests.
Returns:
A urllib2.OpenerDirector object.
"""
raise NotImplementedError()
def _CreateRequest(self, url, data=None):
"""Creates a new urllib request."""
logging.debug("Creating request for: '%s' with payload:\n%s", url, data)
req = urllib2.Request(url, data=data)
if self.host_override:
req.add_header("Host", self.host_override)
for key, value in self.extra_headers.iteritems():
req.add_header(key, value)
return req
def _GetAuthToken(self, email, password):
"""Uses ClientLogin to authenticate the user, returning an auth token.
Args:
email: The user's email address
password: The user's password
Raises:
ClientLoginError: If there was an error authenticating with ClientLogin.
HTTPError: If there was some other form of HTTP error.
Returns:
The authentication token returned by ClientLogin.
"""
account_type = "GOOGLE"
if self.host.endswith(".google.com"):
# Needed for use inside Google.
account_type = "HOSTED"
account_type = "GOOGLE"
req = self._CreateRequest(
url="https://www.google.com/accounts/ClientLogin",
data=urllib.urlencode({
"Email": email,
"Passwd": password,
"service": "ah",
"source": "rietveld-codereview-upload",
"accountType": account_type,
}),
)
try:
response = self.opener.open(req)
response_body = response.read()
response_dict = dict(x.split("=")
for x in response_body.split("\n") if x)
return response_dict["Auth"]
except urllib2.HTTPError, e:
if e.code == 403:
body = e.read()
response_dict = dict(x.split("=", 1) for x in body.split("\n") if x)
raise ClientLoginError(req.get_full_url(), e.code, e.msg,
e.headers, response_dict)
else:
raise
def _GetAuthCookie(self, auth_token):
"""Fetches authentication cookies for an authentication token.
Args:
auth_token: The authentication token returned by ClientLogin.
Raises:
HTTPError: If there was an error fetching the authentication cookies.
"""
# This is a dummy value to allow us to identify when we're successful.
continue_location = "http://localhost/"
args = {"continue": continue_location, "auth": auth_token}
req = self._CreateRequest("http://%s/_ah/login?%s" %
(self.host, urllib.urlencode(args)))
try:
response = self.opener.open(req)
except urllib2.HTTPError, e:
response = e
if (response.code != 302 or
response.info()["location"] != continue_location):
raise urllib2.HTTPError(req.get_full_url(), response.code, response.msg,
response.headers, response.fp)
self.authenticated = True
def _Authenticate(self):
"""Authenticates the user.
The authentication process works as follows:
1) We get a username and password from the user
2) We use ClientLogin to obtain an AUTH token for the user
(see http://code.google.com/apis/accounts/AuthForInstalledApps.html).
3) We pass the auth token to /_ah/login on the server to obtain an
authentication cookie. If login was successful, it tries to redirect
us to the URL we provided.
If we attempt to access the upload API without first obtaining an
authentication cookie, it returns a 401 response and directs us to
authenticate ourselves with ClientLogin.
"""
for i in range(3):
credentials = self.auth_function()
try:
auth_token = self._GetAuthToken(credentials[0], credentials[1])
except ClientLoginError, e:
if e.reason == "BadAuthentication":
print >>sys.stderr, "Invalid username or password."
continue
if e.reason == "CaptchaRequired":
print >>sys.stderr, (
"Please go to\n"
"https://www.google.com/accounts/DisplayUnlockCaptcha\n"
"and verify you are a human. Then try again.")
break
if e.reason == "NotVerified":
print >>sys.stderr, "Account not verified."
break
if e.reason == "TermsNotAgreed":
print >>sys.stderr, "User has not agreed to TOS."
break
if e.reason == "AccountDeleted":
print >>sys.stderr, "The user account has been deleted."
break
if e.reason == "AccountDisabled":
print >>sys.stderr, "The user account has been disabled."
break
if e.reason == "ServiceDisabled":
print >>sys.stderr, ("The user's access to the service has been "
"disabled.")
break
if e.reason == "ServiceUnavailable":
print >>sys.stderr, "The service is not available; try again later."
break
raise
self._GetAuthCookie(auth_token)
return
def Send(self, request_path, payload=None,
content_type="application/octet-stream",
timeout=None,
**kwargs):
"""Sends an RPC and returns the response.
Args:
request_path: The path to send the request to, eg /api/appversion/create.
payload: The body of the request, or None to send an empty request.
content_type: The Content-Type header to use.
timeout: timeout in seconds; default None i.e. no timeout.
(Note: for large requests on OS X, the timeout doesn't work right.)
kwargs: Any keyword arguments are converted into query string parameters.
Returns:
The response body, as a string.
"""
# TODO: Don't require authentication. Let the server say
# whether it is necessary.
if not self.authenticated:
self._Authenticate()
old_timeout = socket.getdefaulttimeout()
socket.setdefaulttimeout(timeout)
try:
tries = 0
while True:
tries += 1
args = dict(kwargs)
url = "http://%s%s" % (self.host, request_path)
if args:
url += "?" + urllib.urlencode(args)
req = self._CreateRequest(url=url, data=payload)
req.add_header("Content-Type", content_type)
try:
f = self.opener.open(req)
response = f.read()
f.close()
return response
except urllib2.HTTPError, e:
if tries > 3:
raise
elif e.code == 302 or e.code == 401:
self._Authenticate()
elif e.code >= 500 and e.code < 600:
# Server Error - try again.
print "server error "+str(e.code)+": sleeping and retrying..."
time.sleep(1)
continue
else:
raise
finally:
socket.setdefaulttimeout(old_timeout)
class HttpRpcServer(AbstractRpcServer):
"""Provides a simplified RPC-style interface for HTTP requests."""
def _Authenticate(self):
"""Save the cookie jar after authentication."""
super(HttpRpcServer, self)._Authenticate()
if self.save_cookies:
StatusUpdate("Saving authentication cookies to %s" % self.cookie_file)
self.cookie_jar.save()
def _GetOpener(self):
"""Returns an OpenerDirector that supports cookies and ignores redirects.
Returns:
A urllib2.OpenerDirector object.
"""
opener = urllib2.OpenerDirector()
opener.add_handler(urllib2.ProxyHandler())
opener.add_handler(urllib2.UnknownHandler())
opener.add_handler(urllib2.HTTPHandler())
opener.add_handler(urllib2.HTTPDefaultErrorHandler())
opener.add_handler(urllib2.HTTPSHandler())
opener.add_handler(urllib2.HTTPErrorProcessor())
if self.save_cookies:
self.cookie_file = os.path.expanduser("~/.loadtest_cookies")
self.cookie_jar = cookielib.MozillaCookieJar(self.cookie_file)
if os.path.exists(self.cookie_file):
try:
self.cookie_jar.load()
self.authenticated = True
StatusUpdate("Loaded authentication cookies from %s" %
self.cookie_file)
except (cookielib.LoadError, IOError):
# Failed to load cookies - just ignore them.
pass
else:
# Create an empty cookie file with mode 600
fd = os.open(self.cookie_file, os.O_CREAT, 0600)
os.close(fd)
# Always chmod the cookie file
os.chmod(self.cookie_file, 0600)
else:
# Don't save cookies across runs of update.py.
self.cookie_jar = cookielib.CookieJar()
opener.add_handler(urllib2.HTTPCookieProcessor(self.cookie_jar))
return opener
def GetRpcServer(options):
"""Returns an instance of an AbstractRpcServer.
Returns:
A new AbstractRpcServer, on which RPC calls can be made.
"""
def GetUserCredentials():
"""Prompts the user for a username and password."""
email = options.email
if email is None:
email = GetEmail("Email (for capturing appengine quota details)")
password = getpass.getpass("Password for %s: " % email)
return (email, password)
if options.server is None:
options.server = "appengine.google.com"
return HttpRpcServer(options.server, GetUserCredentials,
host_override=options.host,
save_cookies=options.save_cookies)
START_TS = None
RUNNING = False
def start_running():
"""official kickoff, i.e. after any interaction commands."""
global RUNNING, START_TS
RUNNING = True
START_TS = datetime.now()
def secs_since(ts1, ts2):
"""compute seconds since start_running()."""
delta_ts = ts2 - ts1
return 3600*24.0*delta_ts.days + \
1.0*delta_ts.seconds + \
delta_ts.microseconds / 1000000.0
def perfstats(hits, pageviews):
"""computes QPS since start."""
global START_TS
secs_elapsed = secs_since(START_TS, datetime.now())
hit_qps = hits / float(secs_elapsed + 0.01)
pageview_qps = pageviews / float(secs_elapsed + 0.01)
return (secs_elapsed, hit_qps, pageview_qps)
RESULTS = []
RESULTS_lock = thread.allocate_lock()
def append_results(res):
RESULTS_lock.acquire()
RESULTS.append(res)
RESULTS_lock.release()
REQUEST_TYPES = {}
CACHE_HITRATE = {}
REQUEST_FREQ = []
def register_request_type(name, func, freq=10, cache_hitrate="50%"):
"""setup a test case. Default to positive hitrate so we get warm vs.
cold cache stats. Freq is the relative frequency for this type of
request-- larger numbers = larger percentage for the blended results."""
REQUEST_TYPES[name] = func
CACHE_HITRATE[name] = int(re.sub(r'%', '', str(cache_hitrate).strip()))
for i in range(freq):
REQUEST_FREQ.append(name)
#BASE_URL = "http://footprint2009dev.appspot.com/"
BASE_URL = "http://footprint-loadtest.appspot.com/"
def disable_caching(url):
"""footprint-specific method to disable caching."""
if url.find("?") > 0:
# note: ?& is harmless
return url + "&cache=0"
else:
return url + "?cache=0"
URLS_SEEN = {}
def make_request(cached, url):
"""actually make HTTP request."""
if not cached:
url = disable_caching(url)
if url not in URLS_SEEN:
seen_url = re.sub(re.compile("^"+BASE_URL), '/', url)
print "fetching "+seen_url
URLS_SEEN[url] = True
try:
infh = urllib.urlopen(url)
content = infh.read()
except:
print "error reading "+url
content = ""
return content
def search_url(base, loc="Chicago,IL", keyword="park"):
"""construct FP search URL, defaulting to [park] near [Chicago,IL]"""
if BASE_URL[-1] == '/' and base[0] == '/':
url = BASE_URL+base[1:]
else:
url = BASE_URL+base
if loc and loc != "":
url += "&vol_loc="+loc
if keyword and keyword != "":
url += "&q="+keyword
return url
def error_request(name, cached=False):
"""requests for 404 junk on the site. Here mostly to prove that
the framework does catch errors."""
if make_request(cached, BASE_URL+"foo") == "":
return ""
return "no content"
register_request_type("error", error_request, freq=5)
def static_url():
"""all static requests are roughly equivalent."""
return BASE_URL+"images/background-gradient.png"
def fp_find_embedded_objects(base_url, content):
"""cheesy little HTML parser, which also approximates browser caching
of items on both / and /ui_snippets."""
objs = []
# strip newlines/etc. used in formatting
content = re.sub(r'\s+', ' ', content)
# one HTML element per line
content = re.sub(r'>', '>\n', content)
for line in content.split('\n'):
#print "found line: "+line
match = re.search(r'<(?:img[^>]+src|script[^>]+src|link[^>]+href)\s*=\s*(.+)',
line)
if match:
match2 = re.search(r'^["\'](.+?)["\']', match.group(1))
url = match2.group(1)
url = re.sub(r'[.][.]/images/', 'images/', url)
url = urlparse.urljoin(base_url, url)
#print "found url: "+url+"\n on base: "+base_url
if url not in objs:
objs.append(url)
return objs
static_content_request_queue = []
static_content_request_lock = thread.allocate_lock()
def fetch_static_content(base_url, content):
"""find the embedded JS/CSS/images and request them."""
urls = fp_find_embedded_objects(base_url, content)
static_content_request_lock.acquire()
static_content_request_queue.extend(urls)
static_content_request_lock.release()
def static_fetcher_main():
"""thread for fetching static content."""
while RUNNING:
if len(static_content_request_queue) == 0:
time.sleep(1)
continue
url = None
static_content_request_lock.acquire()
if len(static_content_request_queue) > 0:
url = static_content_request_queue.pop(0)
static_content_request_lock.release()
if url:
# for static content, caching means client/proxy-side
cached = (random.randint(0, 99) < OPTIONS.static_content_hitrate)
if cached:
continue
ts1 = datetime.now()
content = make_request(False, url)
elapsed = secs_since(ts1, datetime.now())
result_name = "static content requests"
if content == "":
result_name += " (errors)"
append_results([result_name, elapsed])
def homepage_request(name, cached=False):
"""request to FP homepage."""
content = make_request(cached, BASE_URL)
content += make_request(cached, search_url("/ui_snippets?", keyword=""))
return content
register_request_type("page_home", homepage_request)
def initial_serp_request(name, cached=False):
content = make_request(cached, search_url("/search#"))
content += make_request(cached, search_url("/ui_snippets?"))
return content
# don't expect much caching-- use 10% hitrate so we can see warm vs. cold stats
register_request_type("page_serp_initial", initial_serp_request, cache_hitrate="10%")
def nextpage_serp_request(name, cached=False):
# statistically, nextpage is page 2
# 50% hitrate due to the overfetch algorithm
if make_request(cached, search_url("/ui_snippets?start=11")) == "":
return ""
# we expect next-page static content to be 100% cacheable
# so don't return content
return "no content"
# nextpage is relatively rare, but this includes all pagination requests
register_request_type("page_serp_next", nextpage_serp_request, freq=5)
def api_request(name, cached=False):
# API calls are probably more likely to ask for more results and/or paginate
if make_request(cached, search_url("/api/volopps?num=20&key=testkey")) == "":
return ""
# API requests don't create static content requests
return "no content"
# until we have more apps, API calls will be rare
register_request_type("page_api", api_request, freq=2)
def setup_tests():
request_type_counts = {}
for name in REQUEST_FREQ:
if name in request_type_counts:
request_type_counts[name] += 1.0
else:
request_type_counts[name] = 1.0
print "OPTIONS.page_fetchers: %d" % OPTIONS.page_fetchers
print "OPTIONS.static_fetchers: %d" % OPTIONS.static_fetchers
print "OPTIONS.static_content_hitrate: %d%%" % OPTIONS.static_content_hitrate
print "request type breakdown:"
for name, cnt in request_type_counts.iteritems():
print " %4.1f%% - %4d%% cache hitrate - %s" % \
(100.0*cnt/float(len(REQUEST_FREQ)), CACHE_HITRATE[name], name)
def run_tests():
# give the threading system a chance to startup
while RUNNING:
testname = REQUEST_FREQ[random.randint(0, len(REQUEST_FREQ)-1)]
func = REQUEST_TYPES[testname]
cached = (random.randint(0, 99) < CACHE_HITRATE[testname])
ts1 = datetime.now()
content = func(testname, cached)
elapsed = secs_since(ts1, datetime.now())
if cached:
result_name = testname + " (warm cache)"
else:
result_name = testname + " (cold cache)"
# don't count static content towards latency--
# too hard to model CSS/JS execution costs, HTTP pipelining
# and parallel fetching. But we do want to create load on the
# servers
if content and content != "":
fetch_static_content(BASE_URL, content)
else:
result_name = testname + " (errors)"
append_results([result_name, elapsed])
def main():
global RUNNING
setup_tests()
start_running()
for i in range(OPTIONS.page_fetchers):
thread.start_new_thread(run_tests, ())
for i in range(OPTIONS.static_fetchers):
thread.start_new_thread(static_fetcher_main, ())
while RUNNING:
time.sleep(2)
pageviews = 0
hit_reqs = len(RESULTS)
# important to look at a snapshot-- RESULTS is appended by other threads
for i in range(0, hit_reqs-1):
if RESULTS[i][0].find(PAGE_NAME_PREFIX) == 0:
pageviews += 1
total_secs_elapsed, hit_qps, pageview_qps = perfstats(hit_reqs, pageviews)
print " %4.1f: %d hits (%.1f hits/sec), %d pageviews (%.1f pv/sec)" % \
(total_secs_elapsed, len(RESULTS), hit_qps, pageviews, pageview_qps)
sum_elapsed_time = {}
counts = {}
for i in range(0, hit_reqs-1):
name, elapsed_time = RESULTS[i]
if name in sum_elapsed_time:
sum_elapsed_time[name] += elapsed_time
counts[name] += 1
else:
sum_elapsed_time[name] = elapsed_time
counts[name] = 1
total_counts = 0
for name in counts:
total_counts += counts[name]
for name in sorted(sum_elapsed_time):
print " %4d requests (%4.1f%%), %6dms avg latency for %s" % \
(counts[name], float(counts[name]*100)/float(total_counts+0.01),
int(1000*sum_elapsed_time[name]/counts[name]), name)
if total_secs_elapsed >= OPTIONS.run_time:
RUNNING = False
OPTIONS = None
def get_options():
global OPTIONS
parser = optparse.OptionParser(usage="%prog [options]")
# testing options
group = parser.add_option_group("Load testing options")
group.add_option("-r", "--run_time", type="int", default=20,
dest="run_time",
help="how long to run the test (seconds).")
group.add_option("-n", "--page_fetchers", type="int", dest="page_fetchers",
default=4, help="how many pageview fetchers.")
group.add_option("--static_fetchers", type="int", dest="static_fetchers",
default=3, help="how many static content fetchers.")
group.add_option("--static_content_hitrate", type="int",
dest="static_content_hitrate", default=80,
help="client-side hitrate on static content (percent)."+
"note: 100 = don't simulate fetching of static content.")
# server
group = parser.add_option_group("Quota server options")
group.add_option("-s", "--server", action="store", dest="server",
default="appengine.google.com",
metavar="SERVER",
help=("The server with the quota info. The format is host[:port]. "
"Defaults to 'appengine.google.com'."))
group.add_option("-e", "--email", action="store", dest="email",
metavar="EMAIL", default=None,
help="The username to use. Will prompt if omitted.")
group.add_option("-H", "--host", action="store", dest="host",
metavar="HOST", default=None,
help="Overrides the Host header sent with all RPCs.")
group.add_option("--no_cookies", action="store_false",
dest="save_cookies", default=True,
help="Do not save authentication cookies to local disk.")
OPTIONS, args = parser.parse_args(sys.argv[1:])
def get_quota_details():
global OPTIONS
rpc_server = GetRpcServer(OPTIONS)
response_body = rpc_server.Send("/dashboard/quotadetails",
app_id="footprint-loadtest")
# get everything onto one line for easy parsing
content = re.sub("\n", " ", response_body)
content = re.sub("\s+", " ", content)
content = re.sub("> <", "><", content)
content = re.sub("<h3>", "\n<h3>", content)
details = {}
for line in content.split("\n"):
for header in re.finditer("<h3>(.+?)</h3>", line):
category = header.group(1)
for match in re.finditer('<tr><td>([a-zA-Z ]+)</td><td>.+?'+
'>\s*([0-9.+-]+) of ([0-9.+-]+)( [a-zA-Z0-9 ]+ )?',
line):
name = match.group(1)
value = float(match.group(2))
quota = float(match.group(3))
units = match.group(4)
if units == None:
units = ""
else:
units = units.strip()
if name != category:
name = re.sub(re.compile(category+"\s*"), r'', name)
details[category+"."+name] = [value, quota, units]
return details
def fmtnum(num):
"""add commas to a float."""
num = str(num)
while True:
oldnum = num
num = re.sub(r'(\d)(\d\d\d[^\d])', r'\1,\2', oldnum)
if oldnum == num:
break
num = re.sub(r'([.]\d\d)\d+$', r'\1', num)
num = re.sub(r'[.]0+$', r'', num)
return num
if __name__ == "__main__":
#logging.getLogger().setLevel(logging.DEBUG)
get_options()
start_details = get_quota_details()
main()
end_details = get_quota_details()
for key in start_details:
startval = start_details[key][0]
endval = end_details[key][0]
quota = end_details[key][1]
units = end_details[key][2]
delta = endval - startval
day_delta = 86400.0 / OPTIONS.run_time * delta
if quota > 0.0:
delta_pct = "%.1f%%" % (100.0 * day_delta / quota)
else:
delta_pct = "0.0%"
if delta < 0.0001:
continue
print "%45s: %6s of quota: %s used, which scales to %s of %s %s / day." % \
(key, delta_pct, fmtnum(delta), fmtnum(day_delta), fmtnum(quota), units)
| Python |
#!/usr/bin/env python
import os, os.path, sys, re, string, logging, subprocess
import time, shutil, threading
# ---------
# Globals
# ---------
PrintLock = threading.Lock()
# ----------
# Exceptions
# ----------
class InterruptException (Exception):
pass
# ---------
# Logging
# ---------
logging.getLogger('').setLevel(logging.WARNING)
# ---------
# Functions
# ---------
def ExpandShellArgs(argsString):
result = os.popen('echo %s' % (argsString)).read() # Perform any shell substitutions
result = result[:-1] # Chomp newline
return result
def MostRecentDate(date1, date2):
"Allows for None values."
if date1 and date2:
if date1 < date2:
return date2
else:
return date1
elif date1:
return date1
else:
return date2
def RunSubprocess(command):
import subprocess
success = True
try:
process = subprocess.Popen(command, shell=True, stderr=subprocess.PIPE, stdout=subprocess.PIPE)
output, error = process.communicate()
except OSError:
success = False
if success:
if process.returncode < 0:
print 'Build Interrupted'
success = False
elif process.returncode > 0:
success = False
return (success, output, error)
def FindFileInAncestorDirs(dir, filename):
import os.path
ancestor = dir
resultPath = None
while os.path.exists(ancestor):
filepath = os.path.join(ancestor, filename)
if os.path.isfile(filepath):
resultPath = filepath
break
ancestor = os.path.dirname(ancestor).rstrip(os.sep)
return resultPath
# ---------
# Classes
# ---------
class SourceFile:
preprocessFuncs = {}
def __init__(self, path, projectRoot):
self._path = path
self._fileName = os.path.basename(path)
self._directDependencies = []
self._isMainProgram = False
self._projectRoot = projectRoot
self._dependencyCyclesChecked = False
self._buildConfig = None
self._target = None
self._checksum = None
self._verboseOutput = False
self.resetMetadata()
self.resetConfigDepMetadata()
def resetMetadata(self):
self._lastScan = None
self._lastScanChecksum = None
self._usedModules = None
self._containedModules = None
self._includeFileNames = None
def updateWithMetadata(self, metadata):
self.resetMetadata()
if metadata:
lastScan, lastScanChecksum, usedMods, containedMods, includeFiles = metadata
self.setUsedModules(usedMods)
self.setConstainedModules(containedMods)
self.setIncludedFileNames(includeFiles)
self.setLastScan(lastScan)
self.setLastScanChecksum(lastScanChecksum)
def metadata(self):
return (self.lastScan(), self.lastScanChecksum(), self.usedModules(),
self.containedModules(), self.includedFileNames())
def resetConfigDepMetadata(self):
self._lastBuilt = None
self._mostRecentBuildOfDependency = None
self._buildTime = None
self._buildFailed = False
self._markedForBuilding = False
self._needsBuilding = None
self._lastCompileCommand = None
self._buildCompileCommand = None
self._lastChecksum = None
self._buildChecksum = None
def updateWithConfigDepMetadata(self, metadata):
"Metadata is a tuple that is used for persisting the file to disk."
self.resetConfigDepMetadata()
if metadata:
timestamp, compileCommand, checksum = metadata
self.setLastBuilt(timestamp)
self.setBuildTime(timestamp)
self.setLastCompileCommand(compileCommand)
self.setBuildCompileCommand(compileCommand)
self.setLastChecksum(checksum)
self.setBuildChecksum(checksum)
def configDepMetadata(self):
return (self.buildTime(), self.buildCompileCommand(), self.buildChecksum())
def requiresPreprocessing(self):
return False
def preprocessedFilePath(self):
return self._path
def path(self):
"Relative to the project root"
return self._path
def fileName(self):
return self._fileName
def absolutePath(self):
return os.path.join(self._projectRoot, self.path())
def generatesObjectFile(self):
return True
def generatesModuleFile(self):
return (len(self.containedModules()) > 0)
def objectFileName(self):
pathWithoutExt = os.path.splitext(self.path())[0]
return os.path.basename(pathWithoutExt) + '.o'
def lastScanChecksum(self):
return self._lastScanChecksum
def setLastScanChecksum(self, last):
self._lastScanChecksum = last
def lastScan(self):
return self._lastScan
def setLastScan(self, last):
self._lastScan = last
def needsRescan(self):
if not self._lastScan:
return True
else:
return (self._lastScan < self._lastModified) or (self._lastScanChecksum != self._checksum)
def usedModules(self):
return self._usedModules
def setUsedModules(self, usedMods):
self._usedModules = usedMods
def containedModules(self):
return self._containedModules
def setConstainedModules(self, mods):
self._containedModules = mods
def includedFileNames(self):
return self._includeFileNames
def setIncludedFileNames(self, names):
self._includeFileNames = names
def setBuildConfig(self, config):
import weakref
self._needsBuilding = None
self._buildConfig = weakref.proxy(config)
def buildConfig(self):
return self._buildConfig
def setTarget(self, target):
import weakref
self._needsBuilding = None
self._target = weakref.proxy(target)
def target(self):
return self._target
def setLastModified(self, lastModified):
self._needsBuilding = None
self._lastModified = lastModified
def lastModified(self):
return self._lastModified
def setLastBuilt(self, lastBuilt):
self._needsBuilding = None
self._lastBuilt = lastBuilt
def lastBuilt(self):
return self._lastBuilt
def setLastCompileCommand(self, flags):
self._needsBuilding = None
self._lastCompileCommand = flags
def buildCompileCommand(self):
return self._buildCompileCommand
def setBuildCompileCommand(self, flags):
self._buildCompileCommand = flags
def lastCompileCommand(self):
return self._lastCompileCommand
def compileCommand(self):
"Depends on build config, so created on the fly, and not stored."
return None
def setLastChecksum(self, checksum):
self._needsBuilding = None
self._lastChecksum = checksum
def lastChecksum(self):
return self._lastChecksum
def setChecksum(self, checksum):
self._needsBuilding = None
self._checksum = checksum
def checksum(self):
return self._checksum
def setBuildChecksum(self, checksum):
self._buildChecksum = checksum
def buildChecksum(self):
return self._buildChecksum
def setBuildTime(self, buildTime):
"Most recent build time, including the current build"
self._buildTime = buildTime
def buildTime(self):
return self._buildTime
def buildFailed(self):
return self._buildFailed
def setIsMainProgram(self, yn):
self._isMainProgram = yn
def isMainProgram(self):
return self._isMainProgram
def setVerboseOutput(self, verbose):
self._verboseOutput = verbose
def verboseOutput(self):
return self._verboseOutput
def checksumOfFile(self):
import hashlib
fl = open(self.absolutePath(),'r')
m = hashlib.md5()
m.update(fl.read())
checksum = m.digest()
fl.close()
return checksum
def build(self):
self._buildFailed = False
intermediateProductsDir = self._target.intermediateProductsDirectory(self._buildConfig)
os.chdir( intermediateProductsDir )
if self.preprocess():
if self.buildPreprocessedFile():
self.setBuildTime(time.time())
self.setBuildChecksum(self.checksum())
self.setBuildCompileCommand(self.compileCommand())
else:
print 'Failed to compile %s' % (self.fileName())
self._buildFailed = True
else:
print 'Failed to preprocess %s' % (self.fileName())
self._buildFailed = True
return not self._buildFailed
def buildPreprocessedFile(self):
return self.runCompileCommand(self.compileCommand())
def runCompileCommand(self, compileCommand):
if compileCommand == None: return True
PrintLock.acquire()
print 'Compiling %s' % (self.path())
if self._verboseOutput:
print '%s\n' % (compileCommand)
logging.debug('Compile Command: %s' % (compileCommand))
PrintLock.release()
success, output, error = RunSubprocess(compileCommand + ' 2>&1')
if not success:
# Check if preprocessed file was empty. If so, ignore error
f = open(self.preprocessedFilePath(), 'r')
if f.read().strip() == '':
success = True
else:
PrintLock.acquire()
print output
PrintLock.release()
f.close()
else:
# If the compile succeeded, check the output for any warnings to print
if re.search('warn', output, re.IGNORECASE):
print output
return success
def isBuilt(self):
"Whether file has been built in this build."
if None == self._buildTime:
return False
else:
return self._buildTime > self._lastBuilt
def isModified(self):
if not self._lastChecksum:
return True
else:
return (self._checksum != self._buildChecksum) or (self._lastModified > self._lastBuilt)
def compileCommandHasChanged(self):
return self._lastCompileCommand != self.compileCommand()
def addDirectDependency(self, sourceFile):
self._directDependencies.append(sourceFile)
def dependenciesAreCyclic(self, fileStack, checkedFiles):
checkedFiles.add(self)
if self._dependencyCyclesChecked:
fileStack.append(self)
return False, None
self._dependencyCyclesChecked = True
if self in fileStack:
return True, [f.path() for f in fileStack]
else:
fileStack.append(self)
for d in self._directDependencies:
areCyclic, failingFiles = d.dependenciesAreCyclic(fileStack, checkedFiles)
fileStack.pop()
if areCyclic:
return True, failingFiles
return False, None
def mostRecentBuildOfDependency(self):
if self._mostRecentBuildOfDependency:
return self._mostRecentBuildOfDependency
mostRecent = None
for dep in self._directDependencies:
mostRecent = MostRecentDate(mostRecent, dep.lastBuilt())
mostRecent = MostRecentDate(mostRecent, dep.mostRecentBuildOfDependency())
self._mostRecentBuildOfDependency = mostRecent # Cache value
return mostRecent
def needsBuilding(self):
"""
Checks whether a dependent was compiled more recently than
this file, or needs to be compiled.
"""
if None != self._needsBuilding:
return self._needsBuilding # Use cached result for performance
needsBuilding = False
if self.isModified() or self.compileCommandHasChanged():
needsBuilding = True
elif self.mostRecentBuildOfDependency() and self.lastBuilt() and \
(self.mostRecentBuildOfDependency() > self.lastBuilt()):
needsBuilding = True
else:
for dep in self._directDependencies:
if dep.needsBuilding():
needsBuilding = True
break
self._needsBuilding = needsBuilding # Cache result for performance
return needsBuilding
def canBuild(self):
"""
Whether or not all the dependencies are satisfied to allow the file
to be built.
"""
if self._buildFailed: return False
canBuild = True
for dep in self._directDependencies:
if dep.needsBuilding() and not dep.isBuilt():
canBuild = False
break
return canBuild
def preprocessedFilePath(self):
f = self.preprocessFuncs.get('preprocessedFileNameFunction')
if f:
filename = f(self.fileName())
else:
filename = self.fileName()
return os.path.join(self.target().intermediateProductsDirectory(self.buildConfig()), filename)
def preprocess(self):
if self.requiresPreprocessing():
f = self.preprocessFuncs['preprocessorFunction']
return f(self.absolutePath(), self.target().intermediateProductsDirectory(self.buildConfig()))
else:
# Hard link to the source file
preprocessedPath = self.preprocessedFilePath()
if os.path.exists(preprocessedPath): os.remove(preprocessedPath)
os.link(self.absolutePath(), preprocessedPath)
return True
TreeStringIndentLevel = 4
def dependencyString(self, indent):
aString = ''
for dependentFile in self._directDependencies:
aString += '\n' + indent * ' ' + dependentFile.path()
aString += dependentFile.dependencyString(indent + self.TreeStringIndentLevel)
return aString
def __str__(self):
s = '%s %s\n' %(str(self.__class__), self.path())
s += 'Last Built: %s\n' % (self.lastBuilt())
s += 'Last Modified: %s\n' % (self.lastModified())
s += 'Can Build: %s\n' % (self.canBuild())
s += 'Needs Building: %s\n' % (self.needsBuilding())
s += 'Marked for Building: %s\n' % (self.markedForBuilding())
s += 'Dependencies'
s += self.dependencyString(self.TreeStringIndentLevel) + '\n'
return s
class FortranSourceFile (SourceFile):
freeFormRegEx = '.*\.(F|f90|F90)$'
fixedFormRegEx = '.*\.f$'
freeFormPreprocessRegEx = None
fixedFormPreprocessRegEx = None
includeFileRegEx = '.*\.(inc|fh)$'
f90defaultCompileGroup = 'default'
f77defaultCompileGroup = 'default'
@classmethod
def configure(cls, infoDict):
if not infoDict: return
cls.freeFormRegEx = infoDict.setdefault('freeformregex', cls.freeFormRegEx)
cls.fixedFormRegEx = infoDict.setdefault('fixedformregex', cls.fixedFormRegEx)
cls.freeFormPreprocessRegEx = infoDict.setdefault('freeformpreprocessregex', cls.freeFormPreprocessRegEx)
cls.fixedFormPreprocessRegEx = infoDict.setdefault('fixedformpreprocessregex', cls.fixedFormPreprocessRegEx)
cls.includeFileRegEx = infoDict.setdefault('includefileregex', cls.includeFileRegEx)
if 'preprocessfunc' in infoDict:
cls.preprocessFuncs['preprocessorFunction'] = infoDict.get('preprocessfunc')
if 'preprocessednamefunc' in infoDict:
cls.preprocessFuncs['preprocessedFileNameFunction'] = infoDict.get('preprocessednamefunc')
cls.f90defaultCompileGroup = infoDict.setdefault('f90defaultCompileGroup', cls.f90defaultCompileGroup)
cls.f77defaultCompileGroup = infoDict.setdefault('f77defaultCompileGroup', cls.f77defaultCompileGroup)
@classmethod
def fileNameMatchesType(cls, fileName):
return ( cls.freeFormRegEx and re.match(cls.freeFormRegEx, fileName) ) or \
( cls.fixedFormRegEx and re.match(cls.fixedFormRegEx, fileName) ) or \
( cls.freeFormPreprocessRegEx and re.match(cls.freeFormPreprocessRegEx, fileName) ) or \
( cls.fixedFormPreprocessRegEx and re.match(cls.fixedFormPreprocessRegEx, fileName) )
@classmethod
def allFileRegExs(cls):
all = []
if cls.freeFormRegEx: all.append(cls.freeFormRegEx)
if cls.fixedFormRegEx: all.append(cls.fixedFormRegEx)
if cls.freeFormPreprocessRegEx: all.append(cls.freeFormPreprocessRegEx)
if cls.fixedFormPreprocessRegEx: all.append(cls.fixedFormPreprocessRegEx)
if cls.includeFileRegEx: all.append(cls.includeFileRegEx)
return all
def requiresPreprocessing(self):
return \
( self.freeFormPreprocessRegEx and
re.match(self.freeFormPreprocessRegEx, self.fileName()) ) or \
( self.fixedFormPreprocessRegEx and
re.match(self.fixedFormPreprocessRegEx, self.fileName()) )
def isFreeForm(self):
return \
( self.freeFormPreprocessRegEx and
re.match(self.freeFormPreprocessRegEx, self.fileName()) ) or \
( self.freeFormRegEx and
re.match(self.freeFormRegEx, self.fileName()) )
def isFixedForm(self):
return \
( self.fixedFormPreprocessRegEx and
re.match(self.fixedFormPreprocessRegEx, self.fileName()) ) or \
( self.fixedFormRegEx and
re.match(self.fixedFormRegEx, self.fileName()) )
def generatesObjectFile(self):
return not ( self.includeFileRegEx and
re.match(self.includeFileRegEx, self.fileName()) )
def compileCommand(self):
if self.isFixedForm():
compileCommand = self.buildConfig().fortran77CompileCommand(self.target(), self)
elif self.isFreeForm():
compileCommand = self.buildConfig().fortran90CompileCommand(self.target(), self)
else:
compileCommand = None
return compileCommand
def moduleFilePaths(self):
filepaths = []
for m in self.containedModules() :
#FIXME: this works for gfortran and ifort,
# but other fortran compilers might use other naming schemes for module files
filepaths.append(m.lower()+'.mod')
return filepaths
class CSourceFile (SourceFile):
fileNameRegEx = '.*\.c$'
includeFileRegEx = '.*\.h$'
preprocessFileNameRegEx = None
defaultCompileGroup = 'default'
@classmethod
def configure(cls, infoDict):
if not infoDict: return
cls.fileNameRegEx = infoDict.setdefault('fileregex', cls.fileNameRegEx)
cls.includeFileRegEx = infoDict.setdefault('includefileregex', cls.includeFileRegEx)
cls.preprocessFileNameRegEx = infoDict.setdefault('preprocessfileregex', cls.preprocessFileNameRegEx)
if 'preprocessfunc' in infoDict:
cls.preprocessFuncs['preprocessorFunction'] = infoDict.get('preprocessfunc')
if 'preprocessednamefunc' in infoDict:
cls.preprocessFuncs['preprocessedFileNameFunction'] = infoDict.get('preprocessednamefunc')
cls.defaultCompileGroup = infoDict.setdefault('defaultCompileGroup', cls.defaultCompileGroup)
@classmethod
def fileNameMatchesType(cls, fileName):
return (cls.fileNameRegEx and re.match(cls.fileNameRegEx, fileName)) or \
(cls.includeFileRegEx and re.match(cls.includeFileRegEx, fileName)) or \
(cls.preprocessFileNameRegEx and re.match(cls.preprocessFileNameRegEx, fileName))
@classmethod
def allFileRegExs(cls):
all = []
if cls.fileNameRegEx: all.append(cls.fileNameRegEx)
if cls.preprocessFileNameRegEx: all.append(cls.preprocessFileNameRegEx)
if cls.includeFileRegEx: all.append(cls.includeFileRegEx)
return all
def requiresPreprocessing(self):
"Whether an extra preprocessor has to be run (on top of the standard C preprocessor)"
return self.preprocessFileNameRegEx and re.match(self.preprocessFileNameRegEx, self.fileName())
def generatesObjectFile(self):
return not ( self.includeFileRegEx and re.match(self.includeFileRegEx, self.fileName()) )
def compileCommand(self):
if (self.fileNameRegEx and re.match(self.fileNameRegEx, self.fileName()) ) or \
(self.preprocessFileNameRegEx and re.match(self.preprocessFileNameRegEx, self.fileName())):
compileCommand = self.buildConfig().cCompileCommand(self.target(), self)
else:
compileCommand = None
return compileCommand
class SourceTree:
def __init__(self, rootDirs, sourceTreesDependedOn, metadata, projectRoot, skipdirs, skipfiles,
mainProgramFile = None, noDependencies = False, verboseOutput = False,
includeKeywords = [r'use\s+', r'module\s+', r'\*copy\s+', r'include\s*[\'\"]', r'\#include\s*[\'\"]']):
self.rootDirs= rootDirs
self.projectRoot = projectRoot
self.skipfiles = set(skipfiles)
self.skipdirs = set(skipdirs)
self.metadata = metadata
self.mainProgramFile = mainProgramFile
regExStr = r'^\s*(%s)([\d\w_]+)' % (string.join(includeKeywords,'|'),)
self.moduleUseRegEx = re.compile(regExStr, re.IGNORECASE | re.MULTILINE)
self.sourceTreesDependedOn = sourceTreesDependedOn
self.noDependencies = noDependencies
self.verboseOutput = verboseOutput
self.sourceFiles = self.createSourceFiles()
def sourceFiles(self):
return self.sourceFiles
def sourceFileWithName(self, name):
matchingFiles = [f for f in self.sourceFiles if self.mainProgramFile == os.path.basename(f.path())]
if len(matchingFiles) == 1:
return matchingFiles[0]
else:
return None
def containedModulesDict(self):
"Module names contained in each file in tree, with file path as key"
return self.containedModsDict
def createSourceFiles(self):
"""
Create source file objects representing source files in the file
system.
"""
sourceFiles = []
def addFiles(regExStrings, sourceFileClasses):
for rootDir in self.rootDirs:
listOfFileLists = self.locateFiles(regExStrings, rootDir, True)
for files, sourceFileClass in zip(listOfFileLists, sourceFileClasses):
for path, modDate, checksum in files:
newFile = sourceFileClass(path, self.projectRoot)
newFile.setVerboseOutput(self.verboseOutput)
newFile.updateWithMetadata(self.metadata.setdefault(newFile.path(), {}).get('configindep'))
newFile.setLastModified(modDate)
newFile.setChecksum(checksum)
if os.path.basename(path) == self.mainProgramFile:
newFile.setIsMainProgram(True)
sourceFiles.append(newFile)
logging.debug('Searching for fortran source files')
addFiles([FortranSourceFile.allFileRegExs()], [FortranSourceFile])
if not self.noDependencies: self.setupFortranDependencies(sourceFiles)
addFiles([CSourceFile.allFileRegExs()], [CSourceFile])
return sourceFiles
def createSourceFileForPath(self, path):
"Factory method to create a SourceFile object for the path given."
fileName = os.path.basename(path)
f = None
if FortranSourceFile.fileNameMatchesType(fileName):
f = FortranSourceFile(path, self.projectRoot)
elif CSourceFile.fileNameMatchesType(fileName):
f = CSourceFile(path, self.projectRoot)
else:
raise Exception, 'Unknown file type in sourceFileForPath'
f.setVerboseOutput(self.verboseOutput)
return f
def locateFiles(self, fileNameRegExLists, rootDir, calcChecksum):
"""
Locates files matching reg exs passed. Returns lists of lists of tuples,
containing file path and modification date.
"""
import hashlib
def genChecksum(filePath):
fl = open(filePath,'r')
m = hashlib.md5()
m.update(fl.read())
checksum = m.digest()
fl.close()
return checksum
logging.debug('locating files in directory %s' % (rootDir))
listOfListOfRegExes = [[re.compile(regEx) for regEx in regExList] for regExList in fileNameRegExLists]
os.chdir(self.projectRoot)
checksum = None
listOfListOfFileTuples = [[] for r in listOfListOfRegExes]
for root, dirs, files in os.walk(rootDir):
for skipdir in self.skipdirs:
if skipdir in dirs: dirs.remove(skipdir)
for f in files:
if os.path.basename(f) in self.skipfiles: continue
for listOfRegExs, listOfFileTuples in zip(listOfListOfRegExes, listOfListOfFileTuples):
for regEx in listOfRegExs:
if regEx.match(f):
filePath = os.path.join(root,f)
prefix = os.path.commonprefix([filePath, self.projectRoot])
filePath = filePath[len(prefix):]
if filePath[0] == os.sep: filePath = filePath[1:]
if calcChecksum: checksum = genChecksum(filePath)
listOfFileTuples.append( (filePath, os.path.getmtime(filePath), checksum) )
break
return listOfListOfFileTuples
def updateMetadata(self):
pathsToRemove = self.removedFilePaths()
for p in pathsToRemove:
del self.metadata[p]
for f in self.sourceFiles: self.metadata[f.path()]['configindep'] = f.metadata()
def prepareForNewBuildCombo(self, buildConfig, target, clean):
logging.debug('Updating file status')
for f in self.sourceFiles:
f.setTarget(target)
f.setBuildConfig(buildConfig)
metadata = None
if not clean: metadata = self.metadata.get(f.path())
if metadata: metadata = metadata.setdefault('configdep', {}).get(buildConfig.name())
f.updateWithConfigDepMetadata(metadata)
def updateConfigDependMetadata(self, buildConfig):
logging.debug('Updating file metadata')
for f in self.sourceFiles:
configsDict = self.metadata.setdefault(f.path(), {}).setdefault('configdep', {})
configsDict[buildConfig.name()] = f.configDepMetadata()
def removedFilePaths(self):
"Returns set of files removed since last build. Paths are project root relative."
timestampPaths = set(self.metadata.keys())
sourceFilePaths = set([f.path() for f in self.sourceFiles])
pathsRemoved = timestampPaths.difference(sourceFilePaths)
return pathsRemoved
def removedSourceFiles(self):
"Returns set of source files removed since last build."
pathsRemoved = self.removedFilePaths()
filesRemoved = []
for p in pathsRemoved :
f = self.createSourceFileForPath(p)
f.updateWithMetadata(self.metadata[p].get('configindep'))
filesRemoved.append(f)
return filesRemoved
def buildableSourceFiles(self):
"""
Returns a list of source files that need building, and for
which dependencies are satisfied.
"""
logging.debug('Getting buildable source files')
files = []
for s in self.sourceFiles:
if self.noDependencies:
if s.isModified() and not s.isBuilt():
files.append(s)
else:
if s.needsBuilding() and s.canBuild() and not s.isBuilt():
files.append(s)
return files
def scanFileForModules(self, filePath):
usedModules = set()
containedModules = set()
includedFiles = set()
f = open(filePath,'r')
fileContent = f.read()
f.close()
matches = self.moduleUseRegEx.findall(fileContent)
for m in matches:
if m[0].lower().strip() == 'use':
usedModules.add(m[1].lower())
elif m[0].lower().strip() == 'module':
containedModules.add(m[1].lower())
else:
includedFiles.add(m[1])
return list(usedModules), list(containedModules), list(includedFiles)
def setupFortranDependencies(self, fortranSourceFiles):
logging.debug('Setting fortran dependencies')
self.containedModsDict = {}
usedModsDict = {}
includedFilesDict = {}
scanTime = time.time()
for f in fortranSourceFiles:
if f.needsRescan():
usedMods, containedMods, includedFiles = self.scanFileForModules(f.path())
f.setUsedModules(usedMods)
f.setConstainedModules(containedMods)
f.setIncludedFileNames(includedFiles)
f.setLastScan(scanTime)
f.setLastScanChecksum(f.checksum())
else:
usedMods, containedMods, includedFiles = f.usedModules(), f.containedModules(), f.includedFileNames()
usedModsDict[f] = usedMods
includedFilesDict[f] = includedFiles
for m in containedMods:
self.containedModsDict[m] = f
fileBases = [os.path.splitext(f.fileName())[0] for f in fortranSourceFiles]
for f in fortranSourceFiles:
for usedMod in usedModsDict[f]:
fileWithUsedMod = self.containedModsDict.get(usedMod)
if not fileWithUsedMod:
# Search for dependency in other source trees
for sourceTree in self.sourceTreesDependedOn:
fileWithUsedMod = sourceTree.containedModulesDict().get(usedMod)
if fileWithUsedMod: break
if fileWithUsedMod and f != fileWithUsedMod: f.addDirectDependency(fileWithUsedMod)
for includeFile in includedFilesDict[f]:
found = False
for sourceFile, base in zip(fortranSourceFiles, fileBases):
if (sourceFile.fileName() == includeFile) or (base == includeFile):
f.addDirectDependency(sourceFile)
found = True
break
if not found:
raise Exception, 'Could not find include file %s from %s' % (includeFile, f.fileName())
# Check for cycles
print 'Checking for cyclic dependencies'
remainingFiles = set(fortranSourceFiles)
while len(remainingFiles) > 0:
checkedFiles = set()
fileStack = []
f = remainingFiles.pop()
areCyclic, failingFiles = f.dependenciesAreCyclic(fileStack, checkedFiles)
if areCyclic:
raise Exception('The following files have a cyclic dependency: %s' % (failingFiles))
else:
remainingFiles.difference_update(checkedFiles)
def __iter__(self):
return iter(self.sourceFiles)
class Target:
def __init__(self, targetInfoDict, buildRootDir, projectRoot, metadata, setupFunc = None, verboseOutput = False):
self.targetInfoDict = targetInfoDict
self.isBuilt = False
self.lastConfig = None
self._sourceTree = None
self.buildRootDir = buildRootDir
self.targetDependencies = None
self.buildShouldStop = False
self.buildQueue = None
self.projectRoot = projectRoot
self.metadata = metadata
self.verboseOutput = verboseOutput
self.setupFuncTuple = (setupFunc,) # Using a tuple to avoid binding function to class
def mainProgramFile(self):
return self.targetInfoDict.get('mainprogramfile')
def sourceTree(self):
return self._sourceTree
def rootSourceDirectories(self):
return [os.path.join(self.projectRoot, d) for d in self.targetInfoDict['rootdirs']]
def buildSubDirectory(self):
return self.targetInfoDict['buildsubdir']
def name(self):
return self.targetInfoDict['name']
def executableName(self):
return self.targetInfoDict['exename']
def targetDependencies(self):
return self.targetDependencies
def moduleFilePath(self, buildConfig):
modulePath = [self.moduleFileDirectory(buildConfig)]
for t in self.targetDependencies:
modulePath.append(t.moduleFileDirectory(buildConfig))
return modulePath
def dependentLibraryNames(self):
names = [self.libraryName()]
for t in self.targetDependencies:
names.append(t.libraryName())
return names
def dependentLibraryPaths(self, buildConfig):
paths = [self.productLibraryPath(buildConfig)]
for t in self.targetDependencies:
paths.append(t.productLibraryPath(buildConfig))
return paths
def moduleFileDirectory(self, buildConfig):
return self.intermediateProductsDirectory(buildConfig)
def libraryName(self):
return self.targetInfoDict['libraryname']
def fullLibraryName(self):
return 'lib' + self.targetInfoDict['libraryname'] + '.a'
def compileGroups(self):
return self.targetInfoDict['compilegroups']
def buildRootDirectory(self):
"Absolute path to build root."
return self.buildRootDir
def productInstallDirectory(self, buildConfig):
return buildConfig.installDirectory()
def productLibraryDirectory(self, buildConfig):
return os.path.join(self.buildRootDirectory(), buildConfig.buildSubDirectory(), 'lib')
def productLibraryPath(self, buildConfig):
return os.path.join(self.productLibraryDirectory(buildConfig), self.fullLibraryName())
def productExecutableDirectory(self, buildConfig):
return os.path.join(self.buildRootDirectory(), buildConfig.buildSubDirectory(), 'bin')
def intermediateProductsDirectory(self, buildConfig):
return os.path.join(self.buildRootDir, self.buildSubDirectory() + '.build',
buildConfig.buildSubDirectory())
def updateTargetDependencies(self, allTargets):
self.targetDependencies = []
for targetName in self.targetInfoDict['dependson']:
target = [t for t in allTargets if t.name() == targetName][0]
self.targetDependencies.append(target)
def updateMetadata(self):
if self._sourceTree: self._sourceTree.updateMetadata()
def isFirstBuild(self, buildConfig):
return os.path.exists(self.intermediateProductsDirectory(buildConfig))
def build(self, buildConfig, clean, numThreads = 1, noDependencies = False):
if self.isBuilt and self.lastConfig == buildConfig: return True, 0
self.isBuilt = False
self.lastConfig = buildConfig
self.buildShouldStop = False
intermediatesDir = self.intermediateProductsDirectory(buildConfig)
if not os.path.exists(intermediatesDir):
os.makedirs(intermediatesDir)
dependenciesBuilt = True
numFilesBuilt = 0
for t in self.targetDependencies:
logging.debug('Building dependency target %s' % (t.name()))
dependenciesBuilt, n = t.build(buildConfig, clean, numThreads, noDependencies)
numFilesBuilt += n
if not dependenciesBuilt: break
if dependenciesBuilt and not self.buildShouldStop:
self.setBuildEnvironment(buildConfig)
self.isBuilt, n = self.compileSources(buildConfig, numThreads, clean, noDependencies)
numFilesBuilt += n
if self.isBuilt and 'exename' in self.targetInfoDict:
self.isBuilt = self.compileExecutable(buildConfig)
if not self.isBuilt:
print 'Failed to link executable for target %s' % (self.name())
return self.isBuilt, numFilesBuilt
def install(self, buildConfig):
import shutil
if 'exename' not in self.targetInfoDict: return
print 'Installing %s' % (self.name())
exeDir = os.path.join(self.projectRoot, self.productExecutableDirectory(buildConfig))
exePath = os.path.join(exeDir, self.executableName())
binDir = self.productInstallDirectory(buildConfig)
shutil.copy(exePath, binDir)
def stopBuild(self):
self.buildShouldStop = True
for t in self.targetDependencies:
t.stopBuild()
if self.buildQueue:
self.buildQueue.stopBuild()
def setBuildEnvironment(self, buildConfig):
os.environ['FORAY_TARGET_ROOT_DIRS'] = string.join([r'"%s"' % (d) for d in self.rootSourceDirectories()])
os.environ['FORAY_INTERMEDIATE_PRODUCTS_DIR'] = self.intermediateProductsDirectory(buildConfig)
os.environ['FORAY_LIBRARY_PRODUCTS_DIR'] = self.productLibraryDirectory(buildConfig)
os.environ['FORAY_EXECUTABLE_PRODUCTS_DIR'] = self.productExecutableDirectory(buildConfig)
os.environ['FORAY_INSTALL_DIR'] = self.productInstallDirectory(buildConfig)
def compileSources(self, buildConfig, numThreads, clean, noDependencies):
print 'Starting build for target "%s" with config "%s"' % (self.name(), buildConfig.name())
libDirPath = self.productLibraryDirectory(buildConfig)
if not os.path.exists(libDirPath):
os.makedirs(libDirPath)
if self.setupFuncTuple[0]:
self.setupFuncTuple[0](self.projectRoot,
self.rootSourceDirectories(),
self.intermediateProductsDirectory(buildConfig),
self.productLibraryDirectory(buildConfig),
self.productExecutableDirectory(buildConfig),
self.productInstallDirectory(buildConfig) )
if not self._sourceTree:
mainProgramFile = self.targetInfoDict.get('mainprogramfile')
self._sourceTree = SourceTree(self.rootSourceDirectories(),
[t.sourceTree() for t in self.targetDependencies],
self.metadata,
self.projectRoot,
self.targetInfoDict['skipdirs'],
self.targetInfoDict['skipfiles'],
mainProgramFile,
noDependencies,
self.verboseOutput)
self.unarchiveBuildProducts(buildConfig, self._sourceTree.removedFilePaths())
self.removeModuleFiles(buildConfig, self._sourceTree.removedSourceFiles())
self.updateMetadata()
logging.debug('Updating file status')
self._sourceTree.prepareForNewBuildCombo(buildConfig, self, clean)
libFilePath = os.path.join(self.productLibraryDirectory(buildConfig), self.fullLibraryName())
self.buildQueue = BuildQueue(self._sourceTree, buildConfig, self, libFilePath, numThreads)
success = False
numFilesBuilt = 0
if not self.buildShouldStop:
success, numFilesBuilt = self.buildQueue.buildSource()
if success and numFilesBuilt > 0:
# Run ranlib
indexLibCommand = buildConfig.indexLibraryCommand(self)
logging.debug('Indexing library: ' + indexLibCommand)
success, output, error = RunSubprocess(indexLibCommand)
if not success:
print 'ranlib failed'
print output
print error
self.buildQueue = None
self._sourceTree.updateConfigDependMetadata(buildConfig)
if success:
statusString = 'Compiled library'
elif self.buildShouldStop:
statusString = 'Compiling interrupted'
else:
statusString = 'Failed to build library'
print statusString + ' for target "%s" and config "%s"' % (self.name(), buildConfig.name())
return success, numFilesBuilt
def compileExecutable(self, buildConfig):
exeDirPath = self.productExecutableDirectory(buildConfig)
if not os.path.exists(exeDirPath): os.makedirs(exeDirPath)
os.chdir(exeDirPath)
exeCommand = buildConfig.linkExecutableCommand(self)
print 'Compiling executable for %s' % (self.name())
logging.debug('Compile command: %s' % (exeCommand))
success, output, error = RunSubprocess(exeCommand)
if not success:
if output: print output
if error: print error
return success
def archiveBuildProducts(self, buildConfig, sourceFiles):
print 'Archiving object files'
sourceFilesToArchive = [s for s in sourceFiles if not s.isMainProgram()]
if len(sourceFilesToArchive) == 0: return
command = buildConfig.archiveCommand(self, sourceFilesToArchive)
logging.debug('Archiving command: %s' % (command))
if command:
success, output, error = RunSubprocess(command)
if not success:
if output: print output
if error: print error
def unarchiveBuildProducts(self, buildConfig, sourceFilePaths):
"Removes object files corresponding to the project relative paths passed."
print 'Removing object files for which source files no longer exist'
sourceFiles = [self._sourceTree.createSourceFileForPath(p) for p in sourceFilePaths]
sourceFiles = [f for f in sourceFiles if f.generatesObjectFile()]
if len(sourceFiles) == 0: return
command = buildConfig.unarchiveCommand(self, sourceFiles)
logging.debug('Unarchiving command: %s' % (command))
if command:
success, output, error = RunSubprocess(command)
if not success:
if output: print output
if error: print error
def removeModuleFiles(self, buildConfig, removedSourceFiles):
"Removes module files for the given source files."
sourceFiles = [f for f in removedSourceFiles if f.generatesModuleFile()]
if len(sourceFiles) == 0: return
print 'Removing module files for which source files no longer exist'
moduleFiles = []
for f in sourceFiles :
moduleFiles.extend(f.moduleFilePaths())
for f in moduleFiles :
fn = os.path.join(self.intermediateProductsDirectory(buildConfig), f)
print self.buildRootDir
if os.path.exists(fn) :
os.remove(fn)
class BuildConfig:
def __init__(self, configDict, projectRoot):
self.configDict = configDict
self.projectRoot = projectRoot
def name(self):
return self.configDict['name']
def installDirectory(self):
return ExpandShellArgs(self.configDict['installdir'])
def buildSubDirectory(self):
return self.configDict['buildsubdir']
def compileGroupForFile(self, target, sourceFile):
fileName = os.path.split(sourceFile.path())[1]
compileGroups = target.compileGroups()
if isinstance(sourceFile, FortranSourceFile):
if sourceFile.isFixedForm():
fileGroup = sourceFile.f77defaultCompileGroup
else:
fileGroup = sourceFile.f90defaultCompileGroup
elif isinstance(sourceFile, CSourceFile):
fileGroup = sourceFile.defaultCompileGroup
else:
fileGroup = 'default'
for groupName, fileNames in compileGroups.iteritems():
if fileName in fileNames:
fileGroup = groupName
break
return fileGroup
def modulePathOptions(self, target):
modulePath = target.moduleFilePath(self)
optionString = self.configDict['compileroptions']['modpathoption']
moduleString = ''
if len(modulePath) > 0:
moduleString = reduce( lambda x, y: '%s %s "%s"' % (x, optionString, y), modulePath, '' )
return moduleString
def linkLibraryOptions(self, target):
libraryPath = '-L"%s" ' % (target.productLibraryDirectory(self))
dependentLibraryNames = target.dependentLibraryNames()
dependentLibraryPaths = target.dependentLibraryPaths(self)
dependentLibraryNames = [l[0] for l in zip(dependentLibraryNames, dependentLibraryPaths) \
if os.path.exists(l[1])] # Filter non-existent libraries out
optionsString = ''
if len(dependentLibraryNames) > 0:
optionsString = reduce( lambda x, y: '%s -l%s' % (x, y), dependentLibraryNames, libraryPath )
return optionsString
def fortranCompileCommand(self, target, sourceFile, compilerKey, flagsKey):
compilerOptionsDict = self.configDict['compileroptions']
compileGroup = self.compileGroupForFile(target, sourceFile)
compileGroupFlags = compilerOptionsDict['compilegroupflags'][compileGroup]
compiler = compilerOptionsDict[compilerKey]
flags = compilerOptionsDict[flagsKey]
modPathFlags = self.modulePathOptions(target)
sourceFilePath = os.path.join(self.projectRoot, sourceFile.preprocessedFilePath())
return '%s %s %s %s "%s"' % (compiler, flags, compileGroupFlags, modPathFlags, sourceFilePath)
def fortran77CompileCommand(self, target, sourceFile):
return self.fortranCompileCommand(target, sourceFile, 'f77compiler', 'f77flags')
def fortran90CompileCommand(self, target, sourceFile):
return self.fortranCompileCommand(target, sourceFile, 'f90compiler', 'f90flags')
def cCompileCommand(self, target, sourceFile):
compilerOptionsDict = self.configDict['compileroptions']
compileGroup = self.compileGroupForFile(target, sourceFile)
compileGroupFlags = compilerOptionsDict['compilegroupflags'][compileGroup]
compiler = compilerOptionsDict['ccompiler']
flags = compilerOptionsDict['cflags']
sourceFilePath = os.path.join(self.projectRoot, sourceFile.preprocessedFilePath())
return '%s %s %s "%s"' % (compiler, flags, compileGroupFlags, sourceFilePath)
def archiveCommand(self, target, sourceFiles):
"""
Should only be called once object files have been created, because
it checks for their existence.
"""
libPath = target.productLibraryPath(self)
intermedPath = target.intermediateProductsDirectory(self)
paths = [s.objectFileName() for s in sourceFiles if
s.generatesObjectFile() and
os.path.exists(os.path.join(intermedPath,s.objectFileName()))]
paths = string.join(paths)
if len(paths) == 0: return None
changeDirCommand = 'cd "%s"' % (intermedPath)
arCommand = self.configDict['compileroptions']['archivecommand']
arCommand = '%s "%s" %s' % (arCommand, libPath, paths)
removeCommand = 'rm ' + paths
return string.join([changeDirCommand, arCommand, removeCommand], ' ; ')
def unarchiveCommand(self, target, sourceFiles):
libPath = target.productLibraryPath(self)
objects = string.join([s.objectFileName() for s in sourceFiles if s.generatesObjectFile()])
if len(objects) == 0: return None
unarchCommand = self.configDict['compileroptions']['unarchivecommand']
unarchCommand = '%s "%s" %s' % (unarchCommand, libPath, objects)
return unarchCommand
def indexLibraryCommand(self, target):
libPath = target.productLibraryPath(self)
ranlibCommand = self.configDict['compileroptions']['ranlibcommand']
return 'if [ -e "%s" ]; then %s "%s" ; fi' % (libPath, ranlibCommand, libPath)
def linkExecutableCommand(self, target):
mainProgramFileName = target.mainProgramFile()
mainSourceFile = target.sourceTree().sourceFileWithName(mainProgramFileName)
mainObjectName = mainSourceFile.objectFileName()
intermedPath = target.intermediateProductsDirectory(self)
mainObjectPath = os.path.join(intermedPath, mainObjectName)
exeName = target.executableName()
libs = self.linkLibraryOptions(target)
c = self.configDict['compileroptions']
linkCommand = '%s %s -o %s "%s" %s %s %s' % \
(c['link'], c['linkflags'], exeName, mainObjectPath, c['prioritylibs'], libs, c['otherlibs'])
return linkCommand
class BuildQueue:
"""
This class schedules file compilations. It takes account of dependencies, and
is optimized for working on parallel systems.
"""
def __init__(self, sourceTree, buildConfig, target, libFilePath, numThreads):
import threading
self.sourceTree = sourceTree
self.buildConfig = buildConfig
self.target = target
self.libFilePath = libFilePath
self.numParallelThreads = numThreads
self.buildableSourceFilesLock = threading.Lock()
self.builtSourceFilesLock = threading.Lock()
self.buildShouldStop = False
def buildSourceFilesInThread(self):
def getNextSourceFile():
self.buildableSourceFilesLock.acquire()
if len(self.buildableSourceFiles) > 0:
f = self.buildableSourceFiles.pop()
else:
f = None
self.buildableSourceFilesLock.release()
return f
try:
f = getNextSourceFile()
while f and not self.buildShouldStop:
success = f.build()
if success:
self.builtSourceFilesLock.acquire()
self.builtSourceFiles.append(f)
self.builtSourceFilesLock.release()
f = getNextSourceFile()
else:
self.buildShouldStop = True
except Exception, e:
print 'An error occurred: ', e
self.buildShouldStop = True
ArchiveThreshold = 100
def buildSource(self):
import threading
self.buildShouldStop = False
self.buildableSourceFiles = self.sourceTree.buildableSourceFiles()
numFilesBuilt = 0
numFilesBuiltSinceLastArchive = 0
self.builtSourceFiles = []
while len(self.buildableSourceFiles) > 0 and not self.buildShouldStop:
numBuiltBefore = len(self.builtSourceFiles)
threads = []
for threadIndex in range(self.numParallelThreads):
threads.append( threading.Thread(target=self.buildSourceFilesInThread) )
threads[-1].start()
for thread in threads:
thread.join()
numBuiltThisRound = len(self.builtSourceFiles) - numBuiltBefore
numFilesBuilt += numBuiltThisRound
numFilesBuiltSinceLastArchive += numBuiltThisRound
if numFilesBuiltSinceLastArchive >= BuildQueue.ArchiveThreshold:
self.target.archiveBuildProducts(self.buildConfig, self.builtSourceFiles)
numFilesBuiltSinceLastArchive = 0
self.builtSourceFiles = []
if not self.buildShouldStop:
self.buildableSourceFiles = self.sourceTree.buildableSourceFiles()
self.target.archiveBuildProducts(self.buildConfig, self.builtSourceFiles)
return (not self.buildShouldStop), numFilesBuilt
def stopBuild(self):
self.buildShouldStop = True
class Builder:
def __init__(self, buildInfo, targetNames, configNames, projectRoot, numThreads, clean, noDependencies, debug, verbose):
# Store ivars
self.buildInfo = buildInfo
self.projectRoot = projectRoot
self.numThreads = numThreads
self.clean = clean
self.noDependencies = noDependencies
self.buildCancelled = False
self.debug = debug
self.verbose = verbose
# read old metadata
import cPickle
metadataFilePath = self.metadataFilePath()
self.allFileMetadata = None
if os.path.exists(metadataFilePath):
f = open(metadataFilePath, 'rb')
self.allFileMetadata = cPickle.load(f)
f.close()
if not self.allFileMetadata: self.allFileMetadata = {}
# Setup build configurations
self.buildConfigsToBuild = []
if not configNames or len(configNames) == 0:
configNames = [self.buildInfo['defaultconfig']] # default
for configName in configNames:
configDicts = [d for d in self.buildInfo['configs'] if d['name'] == configName]
if len(configDicts) != 1:
raise Exception, 'Invalid configuration %s' % (configName)
configDict = configDicts[0]
flattenedConfigDict = self.flattenConfigInheritance(configDict)
logging.debug('Flattened config dict for %s: %s' % (configDict['name'], str(flattenedConfigDict)))
buildConfig = BuildConfig(flattenedConfigDict, self.projectRoot)
self.buildConfigsToBuild.append(buildConfig)
# Setup targets
self.currentTarget = None
self.allTargets = []
self.targetsToBuild = []
if not targetNames or len(targetNames) == 0:
targetNames = [t['name'] for t in self.buildInfo['targets']] # use all
for targetDict in self.buildInfo['targets']:
targetMetadata = self.allFileMetadata.setdefault(targetDict['name'], {})
target = Target(targetDict, ExpandShellArgs(self.buildInfo['builddir']), self.projectRoot,
targetMetadata, self.buildInfo.get('firstbuildfunc'), verboseOutput=verbose)
self.allTargets.append(target)
if targetDict['name'] in targetNames:
self.targetsToBuild.append(target)
for target in self.allTargets:
target.updateTargetDependencies(self.allTargets)
def flattenConfigInheritance(self, configDict):
import copy
def recursiveUpdate(dictToUpdate, dictToUpdateWith):
"Recursively update a tree of dictionaries"
for key,value in dictToUpdateWith.iteritems():
if key in dictToUpdate and isinstance(value, dict):
recursiveUpdate(dictToUpdate[key], dictToUpdateWith[key])
else:
dictToUpdate[key] = copy.deepcopy(value)
def inheritFromDict(inheritingDict, resultDict):
"Inherit the contents of one dictionary in another"
if 'inherits' in inheritingDict:
configName = inheritingDict['inherits']
inheritedDict = [d for d in self.buildInfo['configs'] if d['name'] == configName][0]
inheritFromDict(inheritedDict, resultDict)
recursiveUpdate(resultDict, inheritingDict)
flattenedDict = {}
inheritFromDict(configDict, flattenedDict)
return flattenedDict
def metadataFilePath(self):
return os.path.join(ExpandShellArgs(self.buildInfo['builddir']), 'build.foraymetadata')
def build(self):
import pprint, cPickle
# Build each combination of target and config
allFileMetadata = self.allFileMetadata
success = True
allTargetsDict = dict([(target.name(), target) for target in self.allTargets])
for config in self.buildConfigsToBuild:
if 'prepareconfigfunc' in self.buildInfo:
self.buildInfo['prepareconfigfunc'](config.name())
for target in self.targetsToBuild:
self.currentTarget = target
success, numFilesBuilt = target.build(config, self.clean, self.numThreads, self.noDependencies)
if not success: break
if not success: break
print 'Storing file meta data'
metadataFilePath = self.metadataFilePath()
if self.debug:
f = open(metadataFilePath + '.debug', 'w')
pprint.pprint(allFileMetadata, f)
f.close()
f = open(metadataFilePath, 'wb')
cPickle.dump(allFileMetadata, f)
f.close()
self.currentTarget = None
return success
def install(self):
"Installs the target products in the respective bin directories."
for t in self.targetsToBuild:
for b in self.buildConfigsToBuild:
t.install(b)
def handleStopSignal(self, signalNum, frame):
self.buildCancelled = True
if self.currentTarget: self.currentTarget.stopBuild()
# -----------------
# Main program
# -----------------
# Read environment variables
numThreads = os.environ.setdefault('FORAY_NUM_THREADS', '1')
# Parse input arguments
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-d", "--debug", action="store_true", dest="debug", help="Print debug info.",
default=False)
parser.add_option("-v", "--verbose", action="store_true", dest="verbose", help="Print verbose output.",
default=False)
parser.add_option("-c", "--clean", action="store_true", dest="clean", help="Whether to rebuild completely.",
default=False)
parser.add_option("-j", "--threads", type="int", dest="numthreads", help="The number of threads to use.",
default=int(numThreads))
parser.add_option("-i", "--buildinfo", type="string", dest="buildinfofile", help="The build info file name.",
default='buildinfo')
parser.add_option("-b", "--buildconfig", type="string", action="append", dest="configs",
help="The configuration to build.")
parser.add_option("-n", "--nodepends", action="store_true", dest="nodepends", help="Do not account for file dependencies.",
default=False)
(options, targets) = parser.parse_args()
# Debugging
if options.debug: logging.getLogger('').setLevel(logging.DEBUG)
# Build info
import signal
buildinfoGlobals = {}
buildinfoPath = FindFileInAncestorDirs(os.getcwd(), options.buildinfofile)
if not buildinfoPath:
sys.exit('Could not locate buildinfo file')
execfile(buildinfoPath, buildinfoGlobals)
if 'buildinfo' not in buildinfoGlobals:
sys.exit('No buildinfo dict found in buildinfo file')
buildInfo = buildinfoGlobals['buildinfo']
# File types
FortranSourceFile.configure(buildInfo.get('fortranfiles'))
CSourceFile.configure(buildInfo.get('cfiles'))
# Project root
if 'projectroot' in buildInfo:
projectRoot = ExpandShellArgs(buildInfo['projectroot'])
else:
projectRoot = os.path.dirname(buildinfoPath)
os.environ['FORAY_PROJECT_ROOT'] = projectRoot
# Create builder and build
builder = Builder(buildInfo, targets, options.configs, projectRoot, options.numthreads,
options.clean, options.nodepends, options.debug, options.verbose)
signal.signal(signal.SIGINT, builder.handleStopSignal)
signal.signal(signal.SIGQUIT, builder.handleStopSignal)
signal.signal(signal.SIGABRT, builder.handleStopSignal)
if options.debug:
if builder.build(): builder.install()
else:
try:
if builder.build():
builder.install()
except Exception, e:
print 'Foray Error: ' + str(e.args[0])
# ------------------------------------------------------------------------------------
# Copyright (c) 2008, Drew McCormack
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
# Neither the name of the Vrije Universiteit (Amsterdam) nor the names of its
# contributors may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.=======
| Python |
#!/usr/bin/env python
import os, os.path, sys, re, string, logging, subprocess
import time, shutil, threading
# ---------
# Globals
# ---------
PrintLock = threading.Lock()
# ----------
# Exceptions
# ----------
class InterruptException (Exception):
pass
# ---------
# Logging
# ---------
logging.getLogger('').setLevel(logging.WARNING)
# ---------
# Functions
# ---------
def ExpandShellArgs(argsString):
result = os.popen('echo %s' % (argsString)).read() # Perform any shell substitutions
result = result[:-1] # Chomp newline
return result
def MostRecentDate(date1, date2):
"Allows for None values."
if date1 and date2:
if date1 < date2:
return date2
else:
return date1
elif date1:
return date1
else:
return date2
def RunSubprocess(command):
import subprocess
success = True
try:
process = subprocess.Popen(command, shell=True, stderr=subprocess.PIPE, stdout=subprocess.PIPE)
output, error = process.communicate()
except OSError:
success = False
if success:
if process.returncode < 0:
print 'Build Interrupted'
success = False
elif process.returncode > 0:
success = False
return (success, output, error)
def FindFileInAncestorDirs(dir, filename):
import os.path
ancestor = dir
resultPath = None
while os.path.exists(ancestor):
filepath = os.path.join(ancestor, filename)
if os.path.isfile(filepath):
resultPath = filepath
break
ancestor = os.path.dirname(ancestor).rstrip(os.sep)
return resultPath
# ---------
# Classes
# ---------
class SourceFile:
preprocessFuncs = {}
def __init__(self, path, projectRoot):
self._path = path
self._fileName = os.path.basename(path)
self._directDependencies = []
self._isMainProgram = False
self._projectRoot = projectRoot
self._dependencyCyclesChecked = False
self._buildConfig = None
self._target = None
self._checksum = None
self._verboseOutput = False
self.resetMetadata()
self.resetConfigDepMetadata()
def resetMetadata(self):
self._lastScan = None
self._lastScanChecksum = None
self._usedModules = None
self._containedModules = None
self._includeFileNames = None
def updateWithMetadata(self, metadata):
self.resetMetadata()
if metadata:
lastScan, lastScanChecksum, usedMods, containedMods, includeFiles = metadata
self.setUsedModules(usedMods)
self.setConstainedModules(containedMods)
self.setIncludedFileNames(includeFiles)
self.setLastScan(lastScan)
self.setLastScanChecksum(lastScanChecksum)
def metadata(self):
return (self.lastScan(), self.lastScanChecksum(), self.usedModules(),
self.containedModules(), self.includedFileNames())
def resetConfigDepMetadata(self):
self._lastBuilt = None
self._mostRecentBuildOfDependency = None
self._buildTime = None
self._buildFailed = False
self._markedForBuilding = False
self._needsBuilding = None
self._lastCompileCommand = None
self._buildCompileCommand = None
self._lastChecksum = None
self._buildChecksum = None
def updateWithConfigDepMetadata(self, metadata):
"Metadata is a tuple that is used for persisting the file to disk."
self.resetConfigDepMetadata()
if metadata:
timestamp, compileCommand, checksum = metadata
self.setLastBuilt(timestamp)
self.setBuildTime(timestamp)
self.setLastCompileCommand(compileCommand)
self.setBuildCompileCommand(compileCommand)
self.setLastChecksum(checksum)
self.setBuildChecksum(checksum)
def configDepMetadata(self):
return (self.buildTime(), self.buildCompileCommand(), self.buildChecksum())
def requiresPreprocessing(self):
return False
def preprocessedFilePath(self):
return self._path
def path(self):
"Relative to the project root"
return self._path
def fileName(self):
return self._fileName
def absolutePath(self):
return os.path.join(self._projectRoot, self.path())
def generatesObjectFile(self):
return True
def generatesModuleFile(self):
return (len(self.containedModules()) > 0)
def objectFileName(self):
pathWithoutExt = os.path.splitext(self.path())[0]
return os.path.basename(pathWithoutExt) + '.o'
def lastScanChecksum(self):
return self._lastScanChecksum
def setLastScanChecksum(self, last):
self._lastScanChecksum = last
def lastScan(self):
return self._lastScan
def setLastScan(self, last):
self._lastScan = last
def needsRescan(self):
if not self._lastScan:
return True
else:
return (self._lastScan < self._lastModified) or (self._lastScanChecksum != self._checksum)
def usedModules(self):
return self._usedModules
def setUsedModules(self, usedMods):
self._usedModules = usedMods
def containedModules(self):
return self._containedModules
def setConstainedModules(self, mods):
self._containedModules = mods
def includedFileNames(self):
return self._includeFileNames
def setIncludedFileNames(self, names):
self._includeFileNames = names
def setBuildConfig(self, config):
import weakref
self._needsBuilding = None
self._buildConfig = weakref.proxy(config)
def buildConfig(self):
return self._buildConfig
def setTarget(self, target):
import weakref
self._needsBuilding = None
self._target = weakref.proxy(target)
def target(self):
return self._target
def setLastModified(self, lastModified):
self._needsBuilding = None
self._lastModified = lastModified
def lastModified(self):
return self._lastModified
def setLastBuilt(self, lastBuilt):
self._needsBuilding = None
self._lastBuilt = lastBuilt
def lastBuilt(self):
return self._lastBuilt
def setLastCompileCommand(self, flags):
self._needsBuilding = None
self._lastCompileCommand = flags
def buildCompileCommand(self):
return self._buildCompileCommand
def setBuildCompileCommand(self, flags):
self._buildCompileCommand = flags
def lastCompileCommand(self):
return self._lastCompileCommand
def compileCommand(self):
"Depends on build config, so created on the fly, and not stored."
return None
def setLastChecksum(self, checksum):
self._needsBuilding = None
self._lastChecksum = checksum
def lastChecksum(self):
return self._lastChecksum
def setChecksum(self, checksum):
self._needsBuilding = None
self._checksum = checksum
def checksum(self):
return self._checksum
def setBuildChecksum(self, checksum):
self._buildChecksum = checksum
def buildChecksum(self):
return self._buildChecksum
def setBuildTime(self, buildTime):
"Most recent build time, including the current build"
self._buildTime = buildTime
def buildTime(self):
return self._buildTime
def buildFailed(self):
return self._buildFailed
def setIsMainProgram(self, yn):
self._isMainProgram = yn
def isMainProgram(self):
return self._isMainProgram
def setVerboseOutput(self, verbose):
self._verboseOutput = verbose
def verboseOutput(self):
return self._verboseOutput
def checksumOfFile(self):
import hashlib
fl = open(self.absolutePath(),'r')
m = hashlib.md5()
m.update(fl.read())
checksum = m.digest()
fl.close()
return checksum
def build(self):
self._buildFailed = False
intermediateProductsDir = self._target.intermediateProductsDirectory(self._buildConfig)
os.chdir( intermediateProductsDir )
if self.preprocess():
if self.buildPreprocessedFile():
self.setBuildTime(time.time())
self.setBuildChecksum(self.checksum())
self.setBuildCompileCommand(self.compileCommand())
else:
print 'Failed to compile %s' % (self.fileName())
self._buildFailed = True
else:
print 'Failed to preprocess %s' % (self.fileName())
self._buildFailed = True
return not self._buildFailed
def buildPreprocessedFile(self):
return self.runCompileCommand(self.compileCommand())
def runCompileCommand(self, compileCommand):
if compileCommand == None: return True
PrintLock.acquire()
print 'Compiling %s' % (self.path())
if self._verboseOutput:
print '%s\n' % (compileCommand)
logging.debug('Compile Command: %s' % (compileCommand))
PrintLock.release()
success, output, error = RunSubprocess(compileCommand + ' 2>&1')
if not success:
# Check if preprocessed file was empty. If so, ignore error
f = open(self.preprocessedFilePath(), 'r')
if f.read().strip() == '':
success = True
else:
PrintLock.acquire()
print output
PrintLock.release()
f.close()
else:
# If the compile succeeded, check the output for any warnings to print
if re.search('warn', output, re.IGNORECASE):
print output
return success
def isBuilt(self):
"Whether file has been built in this build."
if None == self._buildTime:
return False
else:
return self._buildTime > self._lastBuilt
def isModified(self):
if not self._lastChecksum:
return True
else:
return (self._checksum != self._buildChecksum) or (self._lastModified > self._lastBuilt)
def compileCommandHasChanged(self):
return self._lastCompileCommand != self.compileCommand()
def addDirectDependency(self, sourceFile):
self._directDependencies.append(sourceFile)
def dependenciesAreCyclic(self, fileStack, checkedFiles):
checkedFiles.add(self)
if self._dependencyCyclesChecked:
fileStack.append(self)
return False, None
self._dependencyCyclesChecked = True
if self in fileStack:
return True, [f.path() for f in fileStack]
else:
fileStack.append(self)
for d in self._directDependencies:
areCyclic, failingFiles = d.dependenciesAreCyclic(fileStack, checkedFiles)
fileStack.pop()
if areCyclic:
return True, failingFiles
return False, None
def mostRecentBuildOfDependency(self):
if self._mostRecentBuildOfDependency:
return self._mostRecentBuildOfDependency
mostRecent = None
for dep in self._directDependencies:
mostRecent = MostRecentDate(mostRecent, dep.lastBuilt())
mostRecent = MostRecentDate(mostRecent, dep.mostRecentBuildOfDependency())
self._mostRecentBuildOfDependency = mostRecent # Cache value
return mostRecent
def needsBuilding(self):
"""
Checks whether a dependent was compiled more recently than
this file, or needs to be compiled.
"""
if None != self._needsBuilding:
return self._needsBuilding # Use cached result for performance
needsBuilding = False
if self.isModified() or self.compileCommandHasChanged():
needsBuilding = True
elif self.mostRecentBuildOfDependency() and self.lastBuilt() and \
(self.mostRecentBuildOfDependency() > self.lastBuilt()):
needsBuilding = True
else:
for dep in self._directDependencies:
if dep.needsBuilding():
needsBuilding = True
break
self._needsBuilding = needsBuilding # Cache result for performance
return needsBuilding
def canBuild(self):
"""
Whether or not all the dependencies are satisfied to allow the file
to be built.
"""
if self._buildFailed: return False
canBuild = True
for dep in self._directDependencies:
if dep.needsBuilding() and not dep.isBuilt():
canBuild = False
break
return canBuild
def preprocessedFilePath(self):
f = self.preprocessFuncs.get('preprocessedFileNameFunction')
if f:
filename = f(self.fileName())
else:
filename = self.fileName()
return os.path.join(self.target().intermediateProductsDirectory(self.buildConfig()), filename)
def preprocess(self):
if self.requiresPreprocessing():
f = self.preprocessFuncs['preprocessorFunction']
return f(self.absolutePath(), self.target().intermediateProductsDirectory(self.buildConfig()))
else:
# Hard link to the source file
preprocessedPath = self.preprocessedFilePath()
if os.path.exists(preprocessedPath): os.remove(preprocessedPath)
os.link(self.absolutePath(), preprocessedPath)
return True
TreeStringIndentLevel = 4
def dependencyString(self, indent):
aString = ''
for dependentFile in self._directDependencies:
aString += '\n' + indent * ' ' + dependentFile.path()
aString += dependentFile.dependencyString(indent + self.TreeStringIndentLevel)
return aString
def __str__(self):
s = '%s %s\n' %(str(self.__class__), self.path())
s += 'Last Built: %s\n' % (self.lastBuilt())
s += 'Last Modified: %s\n' % (self.lastModified())
s += 'Can Build: %s\n' % (self.canBuild())
s += 'Needs Building: %s\n' % (self.needsBuilding())
s += 'Marked for Building: %s\n' % (self.markedForBuilding())
s += 'Dependencies'
s += self.dependencyString(self.TreeStringIndentLevel) + '\n'
return s
class FortranSourceFile (SourceFile):
freeFormRegEx = '.*\.(F|f90|F90)$'
fixedFormRegEx = '.*\.f$'
freeFormPreprocessRegEx = None
fixedFormPreprocessRegEx = None
includeFileRegEx = '.*\.(inc|fh)$'
f90defaultCompileGroup = 'default'
f77defaultCompileGroup = 'default'
@classmethod
def configure(cls, infoDict):
if not infoDict: return
cls.freeFormRegEx = infoDict.setdefault('freeformregex', cls.freeFormRegEx)
cls.fixedFormRegEx = infoDict.setdefault('fixedformregex', cls.fixedFormRegEx)
cls.freeFormPreprocessRegEx = infoDict.setdefault('freeformpreprocessregex', cls.freeFormPreprocessRegEx)
cls.fixedFormPreprocessRegEx = infoDict.setdefault('fixedformpreprocessregex', cls.fixedFormPreprocessRegEx)
cls.includeFileRegEx = infoDict.setdefault('includefileregex', cls.includeFileRegEx)
if 'preprocessfunc' in infoDict:
cls.preprocessFuncs['preprocessorFunction'] = infoDict.get('preprocessfunc')
if 'preprocessednamefunc' in infoDict:
cls.preprocessFuncs['preprocessedFileNameFunction'] = infoDict.get('preprocessednamefunc')
cls.f90defaultCompileGroup = infoDict.setdefault('f90defaultCompileGroup', cls.f90defaultCompileGroup)
cls.f77defaultCompileGroup = infoDict.setdefault('f77defaultCompileGroup', cls.f77defaultCompileGroup)
@classmethod
def fileNameMatchesType(cls, fileName):
return ( cls.freeFormRegEx and re.match(cls.freeFormRegEx, fileName) ) or \
( cls.fixedFormRegEx and re.match(cls.fixedFormRegEx, fileName) ) or \
( cls.freeFormPreprocessRegEx and re.match(cls.freeFormPreprocessRegEx, fileName) ) or \
( cls.fixedFormPreprocessRegEx and re.match(cls.fixedFormPreprocessRegEx, fileName) )
@classmethod
def allFileRegExs(cls):
all = []
if cls.freeFormRegEx: all.append(cls.freeFormRegEx)
if cls.fixedFormRegEx: all.append(cls.fixedFormRegEx)
if cls.freeFormPreprocessRegEx: all.append(cls.freeFormPreprocessRegEx)
if cls.fixedFormPreprocessRegEx: all.append(cls.fixedFormPreprocessRegEx)
if cls.includeFileRegEx: all.append(cls.includeFileRegEx)
return all
def requiresPreprocessing(self):
return \
( self.freeFormPreprocessRegEx and
re.match(self.freeFormPreprocessRegEx, self.fileName()) ) or \
( self.fixedFormPreprocessRegEx and
re.match(self.fixedFormPreprocessRegEx, self.fileName()) )
def isFreeForm(self):
return \
( self.freeFormPreprocessRegEx and
re.match(self.freeFormPreprocessRegEx, self.fileName()) ) or \
( self.freeFormRegEx and
re.match(self.freeFormRegEx, self.fileName()) )
def isFixedForm(self):
return \
( self.fixedFormPreprocessRegEx and
re.match(self.fixedFormPreprocessRegEx, self.fileName()) ) or \
( self.fixedFormRegEx and
re.match(self.fixedFormRegEx, self.fileName()) )
def generatesObjectFile(self):
return not ( self.includeFileRegEx and
re.match(self.includeFileRegEx, self.fileName()) )
def compileCommand(self):
if self.isFixedForm():
compileCommand = self.buildConfig().fortran77CompileCommand(self.target(), self)
elif self.isFreeForm():
compileCommand = self.buildConfig().fortran90CompileCommand(self.target(), self)
else:
compileCommand = None
return compileCommand
def moduleFilePaths(self):
filepaths = []
for m in self.containedModules() :
#FIXME: this works for gfortran and ifort,
# but other fortran compilers might use other naming schemes for module files
filepaths.append(m.lower()+'.mod')
return filepaths
class CSourceFile (SourceFile):
fileNameRegEx = '.*\.c$'
includeFileRegEx = '.*\.h$'
preprocessFileNameRegEx = None
defaultCompileGroup = 'default'
@classmethod
def configure(cls, infoDict):
if not infoDict: return
cls.fileNameRegEx = infoDict.setdefault('fileregex', cls.fileNameRegEx)
cls.includeFileRegEx = infoDict.setdefault('includefileregex', cls.includeFileRegEx)
cls.preprocessFileNameRegEx = infoDict.setdefault('preprocessfileregex', cls.preprocessFileNameRegEx)
if 'preprocessfunc' in infoDict:
cls.preprocessFuncs['preprocessorFunction'] = infoDict.get('preprocessfunc')
if 'preprocessednamefunc' in infoDict:
cls.preprocessFuncs['preprocessedFileNameFunction'] = infoDict.get('preprocessednamefunc')
cls.defaultCompileGroup = infoDict.setdefault('defaultCompileGroup', cls.defaultCompileGroup)
@classmethod
def fileNameMatchesType(cls, fileName):
return (cls.fileNameRegEx and re.match(cls.fileNameRegEx, fileName)) or \
(cls.includeFileRegEx and re.match(cls.includeFileRegEx, fileName)) or \
(cls.preprocessFileNameRegEx and re.match(cls.preprocessFileNameRegEx, fileName))
@classmethod
def allFileRegExs(cls):
all = []
if cls.fileNameRegEx: all.append(cls.fileNameRegEx)
if cls.preprocessFileNameRegEx: all.append(cls.preprocessFileNameRegEx)
if cls.includeFileRegEx: all.append(cls.includeFileRegEx)
return all
def requiresPreprocessing(self):
"Whether an extra preprocessor has to be run (on top of the standard C preprocessor)"
return self.preprocessFileNameRegEx and re.match(self.preprocessFileNameRegEx, self.fileName())
def generatesObjectFile(self):
return not ( self.includeFileRegEx and re.match(self.includeFileRegEx, self.fileName()) )
def compileCommand(self):
if (self.fileNameRegEx and re.match(self.fileNameRegEx, self.fileName()) ) or \
(self.preprocessFileNameRegEx and re.match(self.preprocessFileNameRegEx, self.fileName())):
compileCommand = self.buildConfig().cCompileCommand(self.target(), self)
else:
compileCommand = None
return compileCommand
class SourceTree:
def __init__(self, rootDirs, sourceTreesDependedOn, metadata, projectRoot, skipdirs, skipfiles,
mainProgramFile = None, noDependencies = False, verboseOutput = False,
includeKeywords = [r'use\s+', r'module\s+', r'\*copy\s+', r'include\s*[\'\"]', r'\#include\s*[\'\"]']):
self.rootDirs= rootDirs
self.projectRoot = projectRoot
self.skipfiles = set(skipfiles)
self.skipdirs = set(skipdirs)
self.metadata = metadata
self.mainProgramFile = mainProgramFile
regExStr = r'^\s*(%s)([\d\w_]+)' % (string.join(includeKeywords,'|'),)
self.moduleUseRegEx = re.compile(regExStr, re.IGNORECASE | re.MULTILINE)
self.sourceTreesDependedOn = sourceTreesDependedOn
self.noDependencies = noDependencies
self.verboseOutput = verboseOutput
self.sourceFiles = self.createSourceFiles()
def sourceFiles(self):
return self.sourceFiles
def sourceFileWithName(self, name):
matchingFiles = [f for f in self.sourceFiles if self.mainProgramFile == os.path.basename(f.path())]
if len(matchingFiles) == 1:
return matchingFiles[0]
else:
return None
def containedModulesDict(self):
"Module names contained in each file in tree, with file path as key"
return self.containedModsDict
def createSourceFiles(self):
"""
Create source file objects representing source files in the file
system.
"""
sourceFiles = []
def addFiles(regExStrings, sourceFileClasses):
for rootDir in self.rootDirs:
listOfFileLists = self.locateFiles(regExStrings, rootDir, True)
for files, sourceFileClass in zip(listOfFileLists, sourceFileClasses):
for path, modDate, checksum in files:
newFile = sourceFileClass(path, self.projectRoot)
newFile.setVerboseOutput(self.verboseOutput)
newFile.updateWithMetadata(self.metadata.setdefault(newFile.path(), {}).get('configindep'))
newFile.setLastModified(modDate)
newFile.setChecksum(checksum)
if os.path.basename(path) == self.mainProgramFile:
newFile.setIsMainProgram(True)
sourceFiles.append(newFile)
logging.debug('Searching for fortran source files')
addFiles([FortranSourceFile.allFileRegExs()], [FortranSourceFile])
if not self.noDependencies: self.setupFortranDependencies(sourceFiles)
addFiles([CSourceFile.allFileRegExs()], [CSourceFile])
return sourceFiles
def createSourceFileForPath(self, path):
"Factory method to create a SourceFile object for the path given."
fileName = os.path.basename(path)
f = None
if FortranSourceFile.fileNameMatchesType(fileName):
f = FortranSourceFile(path, self.projectRoot)
elif CSourceFile.fileNameMatchesType(fileName):
f = CSourceFile(path, self.projectRoot)
else:
raise Exception, 'Unknown file type in sourceFileForPath'
f.setVerboseOutput(self.verboseOutput)
return f
def locateFiles(self, fileNameRegExLists, rootDir, calcChecksum):
"""
Locates files matching reg exs passed. Returns lists of lists of tuples,
containing file path and modification date.
"""
import hashlib
def genChecksum(filePath):
fl = open(filePath,'r')
m = hashlib.md5()
m.update(fl.read())
checksum = m.digest()
fl.close()
return checksum
logging.debug('locating files in directory %s' % (rootDir))
listOfListOfRegExes = [[re.compile(regEx) for regEx in regExList] for regExList in fileNameRegExLists]
os.chdir(self.projectRoot)
checksum = None
listOfListOfFileTuples = [[] for r in listOfListOfRegExes]
for root, dirs, files in os.walk(rootDir):
for skipdir in self.skipdirs:
if skipdir in dirs: dirs.remove(skipdir)
for f in files:
if os.path.basename(f) in self.skipfiles: continue
for listOfRegExs, listOfFileTuples in zip(listOfListOfRegExes, listOfListOfFileTuples):
for regEx in listOfRegExs:
if regEx.match(f):
filePath = os.path.join(root,f)
prefix = os.path.commonprefix([filePath, self.projectRoot])
filePath = filePath[len(prefix):]
if filePath[0] == os.sep: filePath = filePath[1:]
if calcChecksum: checksum = genChecksum(filePath)
listOfFileTuples.append( (filePath, os.path.getmtime(filePath), checksum) )
break
return listOfListOfFileTuples
def updateMetadata(self):
pathsToRemove = self.removedFilePaths()
for p in pathsToRemove:
del self.metadata[p]
for f in self.sourceFiles: self.metadata[f.path()]['configindep'] = f.metadata()
def prepareForNewBuildCombo(self, buildConfig, target, clean):
logging.debug('Updating file status')
for f in self.sourceFiles:
f.setTarget(target)
f.setBuildConfig(buildConfig)
metadata = None
if not clean: metadata = self.metadata.get(f.path())
if metadata: metadata = metadata.setdefault('configdep', {}).get(buildConfig.name())
f.updateWithConfigDepMetadata(metadata)
def updateConfigDependMetadata(self, buildConfig):
logging.debug('Updating file metadata')
for f in self.sourceFiles:
configsDict = self.metadata.setdefault(f.path(), {}).setdefault('configdep', {})
configsDict[buildConfig.name()] = f.configDepMetadata()
def removedFilePaths(self):
"Returns set of files removed since last build. Paths are project root relative."
timestampPaths = set(self.metadata.keys())
sourceFilePaths = set([f.path() for f in self.sourceFiles])
pathsRemoved = timestampPaths.difference(sourceFilePaths)
return pathsRemoved
def removedSourceFiles(self):
"Returns set of source files removed since last build."
pathsRemoved = self.removedFilePaths()
filesRemoved = []
for p in pathsRemoved :
f = self.createSourceFileForPath(p)
f.updateWithMetadata(self.metadata[p].get('configindep'))
filesRemoved.append(f)
return filesRemoved
def buildableSourceFiles(self):
"""
Returns a list of source files that need building, and for
which dependencies are satisfied.
"""
logging.debug('Getting buildable source files')
files = []
for s in self.sourceFiles:
if self.noDependencies:
if s.isModified() and not s.isBuilt():
files.append(s)
else:
if s.needsBuilding() and s.canBuild() and not s.isBuilt():
files.append(s)
return files
def scanFileForModules(self, filePath):
usedModules = set()
containedModules = set()
includedFiles = set()
f = open(filePath,'r')
fileContent = f.read()
f.close()
matches = self.moduleUseRegEx.findall(fileContent)
for m in matches:
if m[0].lower().strip() == 'use':
usedModules.add(m[1].lower())
elif m[0].lower().strip() == 'module':
containedModules.add(m[1].lower())
else:
includedFiles.add(m[1])
return list(usedModules), list(containedModules), list(includedFiles)
def setupFortranDependencies(self, fortranSourceFiles):
logging.debug('Setting fortran dependencies')
self.containedModsDict = {}
usedModsDict = {}
includedFilesDict = {}
scanTime = time.time()
for f in fortranSourceFiles:
if f.needsRescan():
usedMods, containedMods, includedFiles = self.scanFileForModules(f.path())
f.setUsedModules(usedMods)
f.setConstainedModules(containedMods)
f.setIncludedFileNames(includedFiles)
f.setLastScan(scanTime)
f.setLastScanChecksum(f.checksum())
else:
usedMods, containedMods, includedFiles = f.usedModules(), f.containedModules(), f.includedFileNames()
usedModsDict[f] = usedMods
includedFilesDict[f] = includedFiles
for m in containedMods:
self.containedModsDict[m] = f
fileBases = [os.path.splitext(f.fileName())[0] for f in fortranSourceFiles]
for f in fortranSourceFiles:
for usedMod in usedModsDict[f]:
fileWithUsedMod = self.containedModsDict.get(usedMod)
if not fileWithUsedMod:
# Search for dependency in other source trees
for sourceTree in self.sourceTreesDependedOn:
fileWithUsedMod = sourceTree.containedModulesDict().get(usedMod)
if fileWithUsedMod: break
if fileWithUsedMod and f != fileWithUsedMod: f.addDirectDependency(fileWithUsedMod)
for includeFile in includedFilesDict[f]:
found = False
for sourceFile, base in zip(fortranSourceFiles, fileBases):
if (sourceFile.fileName() == includeFile) or (base == includeFile):
f.addDirectDependency(sourceFile)
found = True
break
if not found:
raise Exception, 'Could not find include file %s from %s' % (includeFile, f.fileName())
# Check for cycles
print 'Checking for cyclic dependencies'
remainingFiles = set(fortranSourceFiles)
while len(remainingFiles) > 0:
checkedFiles = set()
fileStack = []
f = remainingFiles.pop()
areCyclic, failingFiles = f.dependenciesAreCyclic(fileStack, checkedFiles)
if areCyclic:
raise Exception('The following files have a cyclic dependency: %s' % (failingFiles))
else:
remainingFiles.difference_update(checkedFiles)
def __iter__(self):
return iter(self.sourceFiles)
class Target:
def __init__(self, targetInfoDict, buildRootDir, projectRoot, metadata, setupFunc = None, verboseOutput = False):
self.targetInfoDict = targetInfoDict
self.isBuilt = False
self.lastConfig = None
self._sourceTree = None
self.buildRootDir = buildRootDir
self.targetDependencies = None
self.buildShouldStop = False
self.buildQueue = None
self.projectRoot = projectRoot
self.metadata = metadata
self.verboseOutput = verboseOutput
self.setupFuncTuple = (setupFunc,) # Using a tuple to avoid binding function to class
def mainProgramFile(self):
return self.targetInfoDict.get('mainprogramfile')
def sourceTree(self):
return self._sourceTree
def rootSourceDirectories(self):
return [os.path.join(self.projectRoot, d) for d in self.targetInfoDict['rootdirs']]
def buildSubDirectory(self):
return self.targetInfoDict['buildsubdir']
def name(self):
return self.targetInfoDict['name']
def executableName(self):
return self.targetInfoDict['exename']
def targetDependencies(self):
return self.targetDependencies
def moduleFilePath(self, buildConfig):
modulePath = [self.moduleFileDirectory(buildConfig)]
for t in self.targetDependencies:
modulePath.append(t.moduleFileDirectory(buildConfig))
return modulePath
def dependentLibraryNames(self):
names = [self.libraryName()]
for t in self.targetDependencies:
names.append(t.libraryName())
return names
def dependentLibraryPaths(self, buildConfig):
paths = [self.productLibraryPath(buildConfig)]
for t in self.targetDependencies:
paths.append(t.productLibraryPath(buildConfig))
return paths
def moduleFileDirectory(self, buildConfig):
return self.intermediateProductsDirectory(buildConfig)
def libraryName(self):
return self.targetInfoDict['libraryname']
def fullLibraryName(self):
return 'lib' + self.targetInfoDict['libraryname'] + '.a'
def compileGroups(self):
return self.targetInfoDict['compilegroups']
def buildRootDirectory(self):
"Absolute path to build root."
return self.buildRootDir
def productInstallDirectory(self, buildConfig):
return buildConfig.installDirectory()
def productLibraryDirectory(self, buildConfig):
return os.path.join(self.buildRootDirectory(), buildConfig.buildSubDirectory(), 'lib')
def productLibraryPath(self, buildConfig):
return os.path.join(self.productLibraryDirectory(buildConfig), self.fullLibraryName())
def productExecutableDirectory(self, buildConfig):
return os.path.join(self.buildRootDirectory(), buildConfig.buildSubDirectory(), 'bin')
def intermediateProductsDirectory(self, buildConfig):
return os.path.join(self.buildRootDir, self.buildSubDirectory() + '.build',
buildConfig.buildSubDirectory())
def updateTargetDependencies(self, allTargets):
self.targetDependencies = []
for targetName in self.targetInfoDict['dependson']:
target = [t for t in allTargets if t.name() == targetName][0]
self.targetDependencies.append(target)
def updateMetadata(self):
if self._sourceTree: self._sourceTree.updateMetadata()
def isFirstBuild(self, buildConfig):
return os.path.exists(self.intermediateProductsDirectory(buildConfig))
def build(self, buildConfig, clean, numThreads = 1, noDependencies = False):
if self.isBuilt and self.lastConfig == buildConfig: return True, 0
self.isBuilt = False
self.lastConfig = buildConfig
self.buildShouldStop = False
intermediatesDir = self.intermediateProductsDirectory(buildConfig)
if not os.path.exists(intermediatesDir):
os.makedirs(intermediatesDir)
dependenciesBuilt = True
numFilesBuilt = 0
for t in self.targetDependencies:
logging.debug('Building dependency target %s' % (t.name()))
dependenciesBuilt, n = t.build(buildConfig, clean, numThreads, noDependencies)
numFilesBuilt += n
if not dependenciesBuilt: break
if dependenciesBuilt and not self.buildShouldStop:
self.setBuildEnvironment(buildConfig)
self.isBuilt, n = self.compileSources(buildConfig, numThreads, clean, noDependencies)
numFilesBuilt += n
if self.isBuilt and 'exename' in self.targetInfoDict:
self.isBuilt = self.compileExecutable(buildConfig)
if not self.isBuilt:
print 'Failed to link executable for target %s' % (self.name())
return self.isBuilt, numFilesBuilt
def install(self, buildConfig):
import shutil
if 'exename' not in self.targetInfoDict: return
print 'Installing %s' % (self.name())
exeDir = os.path.join(self.projectRoot, self.productExecutableDirectory(buildConfig))
exePath = os.path.join(exeDir, self.executableName())
binDir = self.productInstallDirectory(buildConfig)
shutil.copy(exePath, binDir)
def stopBuild(self):
self.buildShouldStop = True
for t in self.targetDependencies:
t.stopBuild()
if self.buildQueue:
self.buildQueue.stopBuild()
def setBuildEnvironment(self, buildConfig):
os.environ['FORAY_TARGET_ROOT_DIRS'] = string.join([r'"%s"' % (d) for d in self.rootSourceDirectories()])
os.environ['FORAY_INTERMEDIATE_PRODUCTS_DIR'] = self.intermediateProductsDirectory(buildConfig)
os.environ['FORAY_LIBRARY_PRODUCTS_DIR'] = self.productLibraryDirectory(buildConfig)
os.environ['FORAY_EXECUTABLE_PRODUCTS_DIR'] = self.productExecutableDirectory(buildConfig)
os.environ['FORAY_INSTALL_DIR'] = self.productInstallDirectory(buildConfig)
def compileSources(self, buildConfig, numThreads, clean, noDependencies):
print 'Starting build for target "%s" with config "%s"' % (self.name(), buildConfig.name())
libDirPath = self.productLibraryDirectory(buildConfig)
if not os.path.exists(libDirPath):
os.makedirs(libDirPath)
if self.setupFuncTuple[0]:
self.setupFuncTuple[0](self.projectRoot,
self.rootSourceDirectories(),
self.intermediateProductsDirectory(buildConfig),
self.productLibraryDirectory(buildConfig),
self.productExecutableDirectory(buildConfig),
self.productInstallDirectory(buildConfig) )
if not self._sourceTree:
mainProgramFile = self.targetInfoDict.get('mainprogramfile')
self._sourceTree = SourceTree(self.rootSourceDirectories(),
[t.sourceTree() for t in self.targetDependencies],
self.metadata,
self.projectRoot,
self.targetInfoDict['skipdirs'],
self.targetInfoDict['skipfiles'],
mainProgramFile,
noDependencies,
self.verboseOutput)
self.unarchiveBuildProducts(buildConfig, self._sourceTree.removedFilePaths())
self.removeModuleFiles(buildConfig, self._sourceTree.removedSourceFiles())
self.updateMetadata()
logging.debug('Updating file status')
self._sourceTree.prepareForNewBuildCombo(buildConfig, self, clean)
libFilePath = os.path.join(self.productLibraryDirectory(buildConfig), self.fullLibraryName())
self.buildQueue = BuildQueue(self._sourceTree, buildConfig, self, libFilePath, numThreads)
success = False
numFilesBuilt = 0
if not self.buildShouldStop:
success, numFilesBuilt = self.buildQueue.buildSource()
if success and numFilesBuilt > 0:
# Run ranlib
indexLibCommand = buildConfig.indexLibraryCommand(self)
logging.debug('Indexing library: ' + indexLibCommand)
success, output, error = RunSubprocess(indexLibCommand)
if not success:
print 'ranlib failed'
print output
print error
self.buildQueue = None
self._sourceTree.updateConfigDependMetadata(buildConfig)
if success:
statusString = 'Compiled library'
elif self.buildShouldStop:
statusString = 'Compiling interrupted'
else:
statusString = 'Failed to build library'
print statusString + ' for target "%s" and config "%s"' % (self.name(), buildConfig.name())
return success, numFilesBuilt
def compileExecutable(self, buildConfig):
exeDirPath = self.productExecutableDirectory(buildConfig)
if not os.path.exists(exeDirPath): os.makedirs(exeDirPath)
os.chdir(exeDirPath)
exeCommand = buildConfig.linkExecutableCommand(self)
print 'Compiling executable for %s' % (self.name())
logging.debug('Compile command: %s' % (exeCommand))
success, output, error = RunSubprocess(exeCommand)
if not success:
if output: print output
if error: print error
return success
def archiveBuildProducts(self, buildConfig, sourceFiles):
print 'Archiving object files'
sourceFilesToArchive = [s for s in sourceFiles if not s.isMainProgram()]
if len(sourceFilesToArchive) == 0: return
command = buildConfig.archiveCommand(self, sourceFilesToArchive)
logging.debug('Archiving command: %s' % (command))
if command:
success, output, error = RunSubprocess(command)
if not success:
if output: print output
if error: print error
def unarchiveBuildProducts(self, buildConfig, sourceFilePaths):
"Removes object files corresponding to the project relative paths passed."
print 'Removing object files for which source files no longer exist'
sourceFiles = [self._sourceTree.createSourceFileForPath(p) for p in sourceFilePaths]
sourceFiles = [f for f in sourceFiles if f.generatesObjectFile()]
if len(sourceFiles) == 0: return
command = buildConfig.unarchiveCommand(self, sourceFiles)
logging.debug('Unarchiving command: %s' % (command))
if command:
success, output, error = RunSubprocess(command)
if not success:
if output: print output
if error: print error
def removeModuleFiles(self, buildConfig, removedSourceFiles):
"Removes module files for the given source files."
sourceFiles = [f for f in removedSourceFiles if f.generatesModuleFile()]
if len(sourceFiles) == 0: return
print 'Removing module files for which source files no longer exist'
moduleFiles = []
for f in sourceFiles :
moduleFiles.extend(f.moduleFilePaths())
for f in moduleFiles :
fn = os.path.join(self.intermediateProductsDirectory(buildConfig), f)
print self.buildRootDir
if os.path.exists(fn) :
os.remove(fn)
class BuildConfig:
def __init__(self, configDict, projectRoot):
self.configDict = configDict
self.projectRoot = projectRoot
def name(self):
return self.configDict['name']
def installDirectory(self):
return ExpandShellArgs(self.configDict['installdir'])
def buildSubDirectory(self):
return self.configDict['buildsubdir']
def compileGroupForFile(self, target, sourceFile):
fileName = os.path.split(sourceFile.path())[1]
compileGroups = target.compileGroups()
if isinstance(sourceFile, FortranSourceFile):
if sourceFile.isFixedForm():
fileGroup = sourceFile.f77defaultCompileGroup
else:
fileGroup = sourceFile.f90defaultCompileGroup
elif isinstance(sourceFile, CSourceFile):
fileGroup = sourceFile.defaultCompileGroup
else:
fileGroup = 'default'
for groupName, fileNames in compileGroups.iteritems():
if fileName in fileNames:
fileGroup = groupName
break
return fileGroup
def modulePathOptions(self, target):
modulePath = target.moduleFilePath(self)
optionString = self.configDict['compileroptions']['modpathoption']
moduleString = ''
if len(modulePath) > 0:
moduleString = reduce( lambda x, y: '%s %s "%s"' % (x, optionString, y), modulePath, '' )
return moduleString
def linkLibraryOptions(self, target):
libraryPath = '-L"%s" ' % (target.productLibraryDirectory(self))
dependentLibraryNames = target.dependentLibraryNames()
dependentLibraryPaths = target.dependentLibraryPaths(self)
dependentLibraryNames = [l[0] for l in zip(dependentLibraryNames, dependentLibraryPaths) \
if os.path.exists(l[1])] # Filter non-existent libraries out
optionsString = ''
if len(dependentLibraryNames) > 0:
optionsString = reduce( lambda x, y: '%s -l%s' % (x, y), dependentLibraryNames, libraryPath )
return optionsString
def fortranCompileCommand(self, target, sourceFile, compilerKey, flagsKey):
compilerOptionsDict = self.configDict['compileroptions']
compileGroup = self.compileGroupForFile(target, sourceFile)
compileGroupFlags = compilerOptionsDict['compilegroupflags'][compileGroup]
compiler = compilerOptionsDict[compilerKey]
flags = compilerOptionsDict[flagsKey]
modPathFlags = self.modulePathOptions(target)
sourceFilePath = os.path.join(self.projectRoot, sourceFile.preprocessedFilePath())
return '%s %s %s %s "%s"' % (compiler, flags, compileGroupFlags, modPathFlags, sourceFilePath)
def fortran77CompileCommand(self, target, sourceFile):
return self.fortranCompileCommand(target, sourceFile, 'f77compiler', 'f77flags')
def fortran90CompileCommand(self, target, sourceFile):
return self.fortranCompileCommand(target, sourceFile, 'f90compiler', 'f90flags')
def cCompileCommand(self, target, sourceFile):
compilerOptionsDict = self.configDict['compileroptions']
compileGroup = self.compileGroupForFile(target, sourceFile)
compileGroupFlags = compilerOptionsDict['compilegroupflags'][compileGroup]
compiler = compilerOptionsDict['ccompiler']
flags = compilerOptionsDict['cflags']
sourceFilePath = os.path.join(self.projectRoot, sourceFile.preprocessedFilePath())
return '%s %s %s "%s"' % (compiler, flags, compileGroupFlags, sourceFilePath)
def archiveCommand(self, target, sourceFiles):
"""
Should only be called once object files have been created, because
it checks for their existence.
"""
libPath = target.productLibraryPath(self)
intermedPath = target.intermediateProductsDirectory(self)
paths = [s.objectFileName() for s in sourceFiles if
s.generatesObjectFile() and
os.path.exists(os.path.join(intermedPath,s.objectFileName()))]
paths = string.join(paths)
if len(paths) == 0: return None
changeDirCommand = 'cd "%s"' % (intermedPath)
arCommand = self.configDict['compileroptions']['archivecommand']
arCommand = '%s "%s" %s' % (arCommand, libPath, paths)
removeCommand = 'rm ' + paths
return string.join([changeDirCommand, arCommand, removeCommand], ' ; ')
def unarchiveCommand(self, target, sourceFiles):
libPath = target.productLibraryPath(self)
objects = string.join([s.objectFileName() for s in sourceFiles if s.generatesObjectFile()])
if len(objects) == 0: return None
unarchCommand = self.configDict['compileroptions']['unarchivecommand']
unarchCommand = '%s "%s" %s' % (unarchCommand, libPath, objects)
return unarchCommand
def indexLibraryCommand(self, target):
libPath = target.productLibraryPath(self)
ranlibCommand = self.configDict['compileroptions']['ranlibcommand']
return 'if [ -e "%s" ]; then %s "%s" ; fi' % (libPath, ranlibCommand, libPath)
def linkExecutableCommand(self, target):
mainProgramFileName = target.mainProgramFile()
mainSourceFile = target.sourceTree().sourceFileWithName(mainProgramFileName)
mainObjectName = mainSourceFile.objectFileName()
intermedPath = target.intermediateProductsDirectory(self)
mainObjectPath = os.path.join(intermedPath, mainObjectName)
exeName = target.executableName()
libs = self.linkLibraryOptions(target)
c = self.configDict['compileroptions']
linkCommand = '%s %s -o %s "%s" %s %s %s' % \
(c['link'], c['linkflags'], exeName, mainObjectPath, c['prioritylibs'], libs, c['otherlibs'])
return linkCommand
class BuildQueue:
"""
This class schedules file compilations. It takes account of dependencies, and
is optimized for working on parallel systems.
"""
def __init__(self, sourceTree, buildConfig, target, libFilePath, numThreads):
import threading
self.sourceTree = sourceTree
self.buildConfig = buildConfig
self.target = target
self.libFilePath = libFilePath
self.numParallelThreads = numThreads
self.buildableSourceFilesLock = threading.Lock()
self.builtSourceFilesLock = threading.Lock()
self.buildShouldStop = False
def buildSourceFilesInThread(self):
def getNextSourceFile():
self.buildableSourceFilesLock.acquire()
if len(self.buildableSourceFiles) > 0:
f = self.buildableSourceFiles.pop()
else:
f = None
self.buildableSourceFilesLock.release()
return f
try:
f = getNextSourceFile()
while f and not self.buildShouldStop:
success = f.build()
if success:
self.builtSourceFilesLock.acquire()
self.builtSourceFiles.append(f)
self.builtSourceFilesLock.release()
f = getNextSourceFile()
else:
self.buildShouldStop = True
except Exception, e:
print 'An error occurred: ', e
self.buildShouldStop = True
ArchiveThreshold = 100
def buildSource(self):
import threading
self.buildShouldStop = False
self.buildableSourceFiles = self.sourceTree.buildableSourceFiles()
numFilesBuilt = 0
numFilesBuiltSinceLastArchive = 0
self.builtSourceFiles = []
while len(self.buildableSourceFiles) > 0 and not self.buildShouldStop:
numBuiltBefore = len(self.builtSourceFiles)
threads = []
for threadIndex in range(self.numParallelThreads):
threads.append( threading.Thread(target=self.buildSourceFilesInThread) )
threads[-1].start()
for thread in threads:
thread.join()
numBuiltThisRound = len(self.builtSourceFiles) - numBuiltBefore
numFilesBuilt += numBuiltThisRound
numFilesBuiltSinceLastArchive += numBuiltThisRound
if numFilesBuiltSinceLastArchive >= BuildQueue.ArchiveThreshold:
self.target.archiveBuildProducts(self.buildConfig, self.builtSourceFiles)
numFilesBuiltSinceLastArchive = 0
self.builtSourceFiles = []
if not self.buildShouldStop:
self.buildableSourceFiles = self.sourceTree.buildableSourceFiles()
self.target.archiveBuildProducts(self.buildConfig, self.builtSourceFiles)
return (not self.buildShouldStop), numFilesBuilt
def stopBuild(self):
self.buildShouldStop = True
class Builder:
def __init__(self, buildInfo, targetNames, configNames, projectRoot, numThreads, clean, noDependencies, debug, verbose):
# Store ivars
self.buildInfo = buildInfo
self.projectRoot = projectRoot
self.numThreads = numThreads
self.clean = clean
self.noDependencies = noDependencies
self.buildCancelled = False
self.debug = debug
self.verbose = verbose
# read old metadata
import cPickle
metadataFilePath = self.metadataFilePath()
self.allFileMetadata = None
if os.path.exists(metadataFilePath):
f = open(metadataFilePath, 'rb')
self.allFileMetadata = cPickle.load(f)
f.close()
if not self.allFileMetadata: self.allFileMetadata = {}
# Setup build configurations
self.buildConfigsToBuild = []
if not configNames or len(configNames) == 0:
configNames = [self.buildInfo['defaultconfig']] # default
for configName in configNames:
configDicts = [d for d in self.buildInfo['configs'] if d['name'] == configName]
if len(configDicts) != 1:
raise Exception, 'Invalid configuration %s' % (configName)
configDict = configDicts[0]
flattenedConfigDict = self.flattenConfigInheritance(configDict)
logging.debug('Flattened config dict for %s: %s' % (configDict['name'], str(flattenedConfigDict)))
buildConfig = BuildConfig(flattenedConfigDict, self.projectRoot)
self.buildConfigsToBuild.append(buildConfig)
# Setup targets
self.currentTarget = None
self.allTargets = []
self.targetsToBuild = []
if not targetNames or len(targetNames) == 0:
targetNames = [t['name'] for t in self.buildInfo['targets']] # use all
for targetDict in self.buildInfo['targets']:
targetMetadata = self.allFileMetadata.setdefault(targetDict['name'], {})
target = Target(targetDict, ExpandShellArgs(self.buildInfo['builddir']), self.projectRoot,
targetMetadata, self.buildInfo.get('firstbuildfunc'), verboseOutput=verbose)
self.allTargets.append(target)
if targetDict['name'] in targetNames:
self.targetsToBuild.append(target)
for target in self.allTargets:
target.updateTargetDependencies(self.allTargets)
def flattenConfigInheritance(self, configDict):
import copy
def recursiveUpdate(dictToUpdate, dictToUpdateWith):
"Recursively update a tree of dictionaries"
for key,value in dictToUpdateWith.iteritems():
if key in dictToUpdate and isinstance(value, dict):
recursiveUpdate(dictToUpdate[key], dictToUpdateWith[key])
else:
dictToUpdate[key] = copy.deepcopy(value)
def inheritFromDict(inheritingDict, resultDict):
"Inherit the contents of one dictionary in another"
if 'inherits' in inheritingDict:
configName = inheritingDict['inherits']
inheritedDict = [d for d in self.buildInfo['configs'] if d['name'] == configName][0]
inheritFromDict(inheritedDict, resultDict)
recursiveUpdate(resultDict, inheritingDict)
flattenedDict = {}
inheritFromDict(configDict, flattenedDict)
return flattenedDict
def metadataFilePath(self):
return os.path.join(ExpandShellArgs(self.buildInfo['builddir']), 'build.foraymetadata')
def build(self):
import pprint, cPickle
# Build each combination of target and config
allFileMetadata = self.allFileMetadata
success = True
allTargetsDict = dict([(target.name(), target) for target in self.allTargets])
for config in self.buildConfigsToBuild:
if 'prepareconfigfunc' in self.buildInfo:
self.buildInfo['prepareconfigfunc'](config.name())
for target in self.targetsToBuild:
self.currentTarget = target
success, numFilesBuilt = target.build(config, self.clean, self.numThreads, self.noDependencies)
if not success: break
if not success: break
print 'Storing file meta data'
metadataFilePath = self.metadataFilePath()
if self.debug:
f = open(metadataFilePath + '.debug', 'w')
pprint.pprint(allFileMetadata, f)
f.close()
f = open(metadataFilePath, 'wb')
cPickle.dump(allFileMetadata, f)
f.close()
self.currentTarget = None
return success
def install(self):
"Installs the target products in the respective bin directories."
for t in self.targetsToBuild:
for b in self.buildConfigsToBuild:
t.install(b)
def handleStopSignal(self, signalNum, frame):
self.buildCancelled = True
if self.currentTarget: self.currentTarget.stopBuild()
# -----------------
# Main program
# -----------------
# Read environment variables
numThreads = os.environ.setdefault('FORAY_NUM_THREADS', '1')
# Parse input arguments
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-d", "--debug", action="store_true", dest="debug", help="Print debug info.",
default=False)
parser.add_option("-v", "--verbose", action="store_true", dest="verbose", help="Print verbose output.",
default=False)
parser.add_option("-c", "--clean", action="store_true", dest="clean", help="Whether to rebuild completely.",
default=False)
parser.add_option("-j", "--threads", type="int", dest="numthreads", help="The number of threads to use.",
default=int(numThreads))
parser.add_option("-i", "--buildinfo", type="string", dest="buildinfofile", help="The build info file name.",
default='buildinfo')
parser.add_option("-b", "--buildconfig", type="string", action="append", dest="configs",
help="The configuration to build.")
parser.add_option("-n", "--nodepends", action="store_true", dest="nodepends", help="Do not account for file dependencies.",
default=False)
(options, targets) = parser.parse_args()
# Debugging
if options.debug: logging.getLogger('').setLevel(logging.DEBUG)
# Build info
import signal
buildinfoGlobals = {}
buildinfoPath = FindFileInAncestorDirs(os.getcwd(), options.buildinfofile)
if not buildinfoPath:
sys.exit('Could not locate buildinfo file')
execfile(buildinfoPath, buildinfoGlobals)
if 'buildinfo' not in buildinfoGlobals:
sys.exit('No buildinfo dict found in buildinfo file')
buildInfo = buildinfoGlobals['buildinfo']
# File types
FortranSourceFile.configure(buildInfo.get('fortranfiles'))
CSourceFile.configure(buildInfo.get('cfiles'))
# Project root
if 'projectroot' in buildInfo:
projectRoot = ExpandShellArgs(buildInfo['projectroot'])
else:
projectRoot = os.path.dirname(buildinfoPath)
os.environ['FORAY_PROJECT_ROOT'] = projectRoot
# Create builder and build
builder = Builder(buildInfo, targets, options.configs, projectRoot, options.numthreads,
options.clean, options.nodepends, options.debug, options.verbose)
signal.signal(signal.SIGINT, builder.handleStopSignal)
signal.signal(signal.SIGQUIT, builder.handleStopSignal)
signal.signal(signal.SIGABRT, builder.handleStopSignal)
if options.debug:
if builder.build(): builder.install()
else:
try:
if builder.build():
builder.install()
except Exception, e:
print 'Foray Error: ' + str(e.args[0])
# ------------------------------------------------------------------------------------
# Copyright (c) 2008, Drew McCormack
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
# Neither the name of the Vrije Universiteit (Amsterdam) nor the names of its
# contributors may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.=======
| Python |
#!/usr/bin/env python
import os, os.path, sys, re, string, logging, subprocess
import time, shutil, threading
# ---------
# Globals
# ---------
PrintLock = threading.Lock()
# ----------
# Exceptions
# ----------
class InterruptException (Exception):
pass
# ---------
# Logging
# ---------
logging.getLogger('').setLevel(logging.WARNING)
# ---------
# Functions
# ---------
def ExpandShellArgs(argsString):
result = os.popen('echo %s' % (argsString)).read() # Perform any shell substitutions
result = result[:-1] # Chomp newline
return result
def MostRecentDate(date1, date2):
"Allows for None values."
if date1 and date2:
if date1 < date2:
return date2
else:
return date1
elif date1:
return date1
else:
return date2
def RunSubprocess(command):
import subprocess
success = True
try:
process = subprocess.Popen(command, shell=True, stderr=subprocess.PIPE, stdout=subprocess.PIPE)
output, error = process.communicate()
except OSError:
success = False
if success:
if process.returncode < 0:
print 'Build Interrupted'
success = False
elif process.returncode > 0:
success = False
return (success, output, error)
# ---------
# Classes
# ---------
class SourceFile:
preprocessFuncs = {}
def __init__(self, path, projectRoot):
self._path = path
self._directDependencies = []
self._isMainProgram = False
self._projectRoot = projectRoot
self._dependencyCyclesChecked = False
self._buildConfig = None
self._target = None
self.resetBuildMetadata()
self._checksum = None
def resetBuildMetadata(self):
self._lastBuilt = None
self._mostRecentBuildOfDependency = None
self._lastModified = None
self._buildTime = None
self._buildFailed = False
self._markedForBuilding = False
self._needsBuilding = None
self._lastCompileCommand = None
self._buildCompileCommand = None
self._lastChecksum = None
self._buildChecksum = None
def updateWithMetadata(self, metadata):
"Metadata is a tuple that is used for persisting the file to disk."
if metadata:
timestamp, compileCommand, checksum = metadata
self.setLastBuilt(timestamp)
self.setBuildTime(timestamp)
self.setLastCompileCommand(compileCommand)
self.setBuildCompileCommand(compileCommand)
self.setLastChecksum(checksum)
self.setBuildChecksum(checksum)
else: # Reset
self.resetBuildMetadata()
def metadata(self):
return (self.buildTime(), self.buildCompileCommand(), self.buildChecksum())
def requiresPreprocessing(self):
return False
def preprocessedFilePath(self):
return self._path
def path(self):
"Relative to the project root"
return self._path
def fileName(self):
return os.path.basename(self._path)
def absolutePath(self):
return os.path.join(self._projectRoot, self.path())
def generatesObjectFile(self):
return True
def objectFileName(self):
pathWithoutExt = os.path.splitext(self.path())[0]
return os.path.basename(pathWithoutExt) + '.o'
def setBuildConfig(self, config):
import weakref
self._needsBuilding = None
self._buildConfig = weakref.proxy(config)
def buildConfig(self):
return self._buildConfig
def setTarget(self, target):
import weakref
self._needsBuilding = None
self._target = weakref.proxy(target)
def target(self):
return self._target
def setLastModified(self, lastModified):
self._needsBuilding = None
self._lastModified = lastModified
def lastModified(self):
return self._lastModified
def setLastBuilt(self, lastBuilt):
self._needsBuilding = None
self._lastBuilt = lastBuilt
def lastBuilt(self):
return self._lastBuilt
def setLastCompileCommand(self, flags):
self._needsBuilding = None
self._lastCompileCommand = flags
def buildCompileCommand(self):
return self._buildCompileCommand
def setBuildCompileCommand(self, flags):
self._buildCompileCommand = flags
def lastCompileCommand(self):
return self._lastCompileCommand
def compileCommand(self):
"Depends on build config, so created on the fly, and not stored."
return None
def setLastChecksum(self, checksum):
self._needsBuilding = None
self._lastChecksum = checksum
def lastChecksum(self):
return self._lastChecksum
def setChecksum(self, checksum):
self._needsBuilding = None
self._checksum = checksum
def checksum(self):
return self._checksum
def setBuildChecksum(self, checksum):
self._buildChecksum = checksum
def buildChecksum(self):
return self._buildChecksum
def setBuildTime(self, buildTime):
"Most recent build time, including the current build"
self._buildTime = buildTime
def buildTime(self):
return self._buildTime
def buildFailed(self):
return self._buildFailed
def setIsMainProgram(self, yn):
self._isMainProgram = yn
def isMainProgram(self):
return self._isMainProgram
def checksumOfFile(self):
import md5
fl = open(self.absolutePath(),'r')
checksum = md5.new(fl.read()).digest()
fl.close()
return checksum
def build(self):
self._buildFailed = False
intermediateProductsDir = self._target.intermediateProductsDirectory(self._buildConfig)
os.chdir( intermediateProductsDir )
if self.preprocess():
if self.buildPreprocessedFile():
self.setBuildTime(time.time())
self.setBuildChecksum(self.checksum())
self.setBuildCompileCommand(self.compileCommand())
else:
self._buildFailed = True
else:
self._buildFailed = True
return not self._buildFailed
def buildPreprocessedFile(self):
return self.runCompileCommand(self.compileCommand())
def runCompileCommand(self, compileCommand):
if compileCommand == None: return True
PrintLock.acquire()
print 'Compiling %s' % (self.path())
logging.debug('Compile Command: %s' % (compileCommand))
PrintLock.release()
success, output, error = RunSubprocess(compileCommand + ' 2>&1')
if not success:
# Check if preprocessed file was empty. If so, ignore error
f = open(self.preprocessedFilePath(), 'r')
if f.read().strip() == '':
success = True
else:
PrintLock.acquire()
print output
PrintLock.release()
f.close()
else:
# If the compile succeeded, check the output for any warnings to print
if re.search('warn', output, re.IGNORECASE):
print output
return success
def isBuilt(self):
"Whether file has been built in this build."
if None == self._buildTime:
return False
else:
return self._buildTime > self._lastBuilt
def isModified(self):
if not self._lastChecksum:
return True
else:
return (self._checksum != self._buildChecksum) or (self._lastModified > self._lastBuilt)
def compileCommandHasChanged(self):
return self._lastCompileCommand != self.compileCommand()
def addDirectDependency(self, sourceFile):
self._directDependencies.append(sourceFile)
def dependenciesAreCyclic(self, fileStack, checkedFiles):
checkedFiles.add(self)
if self._dependencyCyclesChecked:
fileStack.append(self)
return False, None
self._dependencyCyclesChecked = True
if self in fileStack:
return True, [f.path() for f in fileStack]
else:
fileStack.append(self)
for d in self._directDependencies:
areCyclic, failingFiles = d.dependenciesAreCyclic(fileStack, checkedFiles)
fileStack.pop()
if areCyclic:
return True, failingFiles
return False, None
def mostRecentBuildOfDependency(self):
if self._mostRecentBuildOfDependency:
return self._mostRecentBuildOfDependency
mostRecent = None
for dep in self._directDependencies:
mostRecent = MostRecentDate(mostRecent, dep.lastBuilt())
mostRecent = MostRecentDate(mostRecent, dep.mostRecentBuildOfDependency())
self._mostRecentBuildOfDependency = mostRecent # Cache value
return mostRecent
def needsBuilding(self):
"""
Checks whether a dependent was compiled more recently than
this file, or needs to be compiled.
"""
if None != self._needsBuilding:
return self._needsBuilding # Use cached result for performance
needsBuilding = False
if self.isModified() or self.compileCommandHasChanged():
needsBuilding = True
elif self.mostRecentBuildOfDependency() and self.lastBuilt() and \
(self.mostRecentBuildOfDependency() > self.lastBuilt()):
needsBuilding = True
else:
for dep in self._directDependencies:
if dep.needsBuilding():
needsBuilding = True
break
self._needsBuilding = needsBuilding # Cache result for performance
return needsBuilding
def canBuild(self):
"""
Whether or not all the dependencies are satisfied to allow the file
to be built.
"""
if self._buildFailed: return False
canBuild = True
for dep in self._directDependencies:
if dep.needsBuilding() and not dep.isBuilt():
canBuild = False
break
return canBuild
def preprocessedFilePath(self):
f = self.preprocessFuncs.get('preprocessedFileNameFunction')
if f:
filename = f(self.fileName())
else:
filename = self.fileName()
return os.path.join(self.target().intermediateProductsDirectory(self.buildConfig()), filename)
def preprocess(self):
if self.requiresPreprocessing():
f = self.preprocessFuncs['preprocessorFunction']
return f(self.absolutePath(), self.target().intermediateProductsDirectory(self.buildConfig()))
else:
# Copy the source file
preprocessedPath = self.preprocessedFilePath()
shutil.copy(self.absolutePath(), preprocessedPath)
return True
TreeStringIndentLevel = 4
def dependencyString(self, indent):
aString = ''
for dependentFile in self._directDependencies:
aString += '\n' + indent * ' ' + dependentFile.path()
aString += dependentFile.dependencyString(indent + self.TreeStringIndentLevel)
return aString
def __str__(self):
s = '%s %s\n' %(str(self.__class__), self.path())
s += 'Last Built: %s\n' % (self.lastBuilt())
s += 'Last Modified: %s\n' % (self.lastModified())
s += 'Can Build: %s\n' % (self.canBuild())
s += 'Needs Building: %s\n' % (self.needsBuilding())
s += 'Marked for Building: %s\n' % (self.markedForBuilding())
s += 'Dependencies'
s += self.dependencyString(self.TreeStringIndentLevel) + '\n'
return s
class FortranSourceFile (SourceFile):
freeFormRegEx = '.*\.(F|f90|F90)$'
fixedFormRegEx = '.*\.f$'
freeFormPreprocessRegEx = None
fixedFormPreprocessRegEx = None
includeFileRegEx = None
@classmethod
def configure(cls, infoDict):
if not infoDict: return
cls.freeFormRegEx = infoDict.setdefault('freeformregex', cls.freeFormRegEx)
cls.fixedFormRegEx = infoDict.setdefault('fixedformregex', cls.fixedFormRegEx)
cls.freeFormPreprocessRegEx = infoDict.setdefault('freeformpreprocessregex', cls.freeFormPreprocessRegEx)
cls.fixedFormPreprocessRegEx = infoDict.setdefault('fixedformpreprocessregex', cls.fixedFormPreprocessRegEx)
cls.includeFileRegEx = infoDict.setdefault('includefileregex', cls.includeFileRegEx)
cls.preprocessFuncs['preprocessorFunction'] = infoDict.setdefault('preprocessfunc', None)
cls.preprocessFuncs['preprocessedFileNameFunction'] = infoDict.setdefault('preprocessednamefunc', None)
@classmethod
def fileNameMatchesType(cls, fileName):
return ( cls.freeFormRegEx and re.match(cls.freeFormRegEx, fileName) ) or \
( cls.fixedFormRegEx and re.match(cls.fixedFormRegEx, fileName) ) or \
( cls.freeFormPreprocessRegEx and re.match(cls.freeFormPreprocessRegEx, fileName) ) or \
( cls.fixedFormPreprocessRegEx and re.match(cls.fixedFormPreprocessRegEx, fileName) )
@classmethod
def allFileRegExs(cls):
all = []
if cls.freeFormRegEx: all.append(cls.freeFormRegEx)
if cls.fixedFormRegEx: all.append(cls.fixedFormRegEx)
if cls.freeFormPreprocessRegEx: all.append(cls.freeFormPreprocessRegEx)
if cls.fixedFormPreprocessRegEx: all.append(cls.fixedFormPreprocessRegEx)
if cls.includeFileRegEx: all.append(cls.includeFileRegEx)
return all
def requiresPreprocessing(self):
return \
( self.freeFormPreprocessRegEx and
re.match(self.freeFormPreprocessRegEx, self.fileName()) ) or \
( self.fixedFormPreprocessRegEx and
re.match(self.fixedFormPreprocessRegEx, self.fileName()) )
def isFreeForm(self):
return \
( self.freeFormPreprocessRegEx and
re.match(self.freeFormPreprocessRegEx, self.fileName()) ) or \
( self.freeFormRegEx and
re.match(self.freeFormRegEx, self.fileName()) )
def isFixedForm(self):
return \
( self.fixedFormPreprocessRegEx and
re.match(self.fixedFormPreprocessRegEx, self.fileName()) ) or \
( self.fixedFormRegEx and
re.match(self.fixedFormRegEx, self.fileName()) )
def generatesObjectFile(self):
return not ( self.includeFileRegEx and
re.match(self.includeFileRegEx, self.fileName()) )
def compileCommand(self):
if self.isFixedForm():
compileCommand = self.buildConfig().fortran77CompileCommand(self.target(), self)
elif self.isFreeForm():
compileCommand = self.buildConfig().fortran90CompileCommand(self.target(), self)
else:
compileCommand = None
return compileCommand
class CSourceFile (SourceFile):
fileNameRegEx = '.*\.c$'
includeFileRegEx = '.*\.h$'
preprocessFileNameRegEx = None
@classmethod
def configure(cls, infoDict):
if not infoDict: return
cls.fileNameRegEx = infoDict.setdefault('fileregex', cls.fileNameRegEx)
cls.includeFileRegEx = infoDict.setdefault('includefileregex', cls.includeFileRegEx)
cls.preprocessFileNameRegEx = infoDict.setdefault('preprocessfileregex', cls.preprocessFileNameRegEx)
cls.preprocessFuncs['preprocessorFunction'] = infoDict.get('preprocessfunc')
cls.preprocessFuncs['preprocessedFileNameFunction'] = infoDict.get('preprocessednamefunc')
@classmethod
def fileNameMatchesType(cls, fileName):
return (cls.fileNameRegEx and re.match(cls.fileNameRegEx, fileName)) or \
(cls.includeFileRegEx and re.match(cls.includeFileRegEx, fileName)) or \
(cls.preprocessFileNameRegEx and re.match(cls.preprocessFileNameRegEx, fileName))
@classmethod
def allFileRegExs(cls):
all = []
if cls.fileNameRegEx: all.append(cls.fileNameRegEx)
if cls.preprocessFileNameRegEx: all.append(cls.preprocessFileNameRegEx)
if cls.includeFileRegEx: all.append(cls.includeFileRegEx)
return all
def requiresPreprocessing(self):
"Whether an extra preprocessor has to be run (on top of the standard C preprocessor)"
return self.preprocessFileNameRegEx and re.match(self.preprocessFileNameRegEx, self.fileName())
def generatesObjectFile(self):
return not ( self.includeFileRegEx and re.match(self.includeFileRegEx, self.fileName()) )
def compileCommand(self):
if (self.fileNameRegEx and re.match(self.fileNameRegEx, self.fileName()) ) or \
(self.preprocessFileNameRegEx and re.match(self.preprocessFileNameRegEx, self.fileName())):
compileCommand = self.buildConfig().cCompileCommand(self.target(), self)
else:
compileCommand = None
return compileCommand
class SourceTree:
def __init__(self, rootDirs, sourceTreesDependedOn, projectRoot, skipdirs, skipfiles,
mainProgramFile = None, noDependencies = False):
self.rootDirs= rootDirs
self.projectRoot = projectRoot
self.skipfiles = set(skipfiles)
self.skipdirs = set(skipdirs)
self.mainProgramFile = mainProgramFile
self.moduleUseRegEx = re.compile(r'^\s*(use|module|\*copy)\s+([\d\w_]+)', re.IGNORECASE | re.MULTILINE)
self.sourceTreesDependedOn = sourceTreesDependedOn
self.noDependencies = noDependencies
self.sourceFiles = self.createSourceFiles()
def sourceFiles(self):
return self.sourceFiles
def sourceFileWithName(self, name):
matchingFiles = [f for f in self.sourceFiles if self.mainProgramFile == os.path.basename(f.path())]
if len(matchingFiles) == 1:
return matchingFiles[0]
else:
return None
def containedModulesDict(self):
"Module names contained in each file in tree, with file path as key"
return self.containedModsDict
def createSourceFiles(self):
"""
Create source file objects representing source files in the file
system.
"""
sourceFiles = []
def addFiles(regExString, sourceFileClass):
if not regExString: return
for rootDir in self.rootDirs:
files = self.locateFiles(regExString, rootDir)
for path, modDate, checksum in files:
newFile = sourceFileClass(path, self.projectRoot)
newFile.setLastModified(modDate)
newFile.setChecksum(checksum)
if os.path.basename(path) == self.mainProgramFile:
newFile.setIsMainProgram(True)
sourceFiles.append(newFile)
logging.debug('Searching for fortran source files')
for regEx in FortranSourceFile.allFileRegExs():
addFiles(regEx, FortranSourceFile)
if not self.noDependencies: self.setupFortranDependencies(sourceFiles)
logging.debug('Searching for c source files')
for regEx in CSourceFile.allFileRegExs():
addFiles(regEx, CSourceFile)
return sourceFiles
def createSourceFileForPath(self, path):
"Factory method to create a SourceFile object for the path given."
fileName = os.path.basename(path)
if FortranSourceFile.fileNameMatchesType(fileName):
return FortranSourceFile(path, self.projectRoot)
elif CSourceFile.fileNameMatchesType(fileName):
return CSourceFile(path, self.projectRoot)
else:
raise Exception, 'Unknown file type in sourceFileForPath'
def locateFiles(self, fileNameRegEx, rootDir):
"""
Locates files matching reg ex passed. Returns list of tuples,
containing file path and modification date.
"""
import md5
logging.debug('locating files in directory %s' % (rootDir))
fileTuples = []
regEx = re.compile(fileNameRegEx)
os.chdir(self.projectRoot)
for root, dirs, files in os.walk(rootDir):
for skipdir in self.skipdirs:
if skipdir in dirs: dirs.remove(skipdir)
for f in files:
if os.path.basename(f) in self.skipfiles: continue
if regEx.match(f):
filePath = os.path.join(root,f)
prefix = os.path.commonprefix([filePath, self.projectRoot])
filePath = filePath[len(prefix):]
if filePath[0] == os.sep: filePath = filePath[1:]
fl = open(filePath,'r')
checksum = md5.new(fl.read()).digest()
fl.close()
fileTuples.append( (filePath, os.path.getmtime(filePath), checksum) )
return fileTuples
def updateFileStatus(self, fileMetaData, buildConfig, target):
logging.debug('Updating file status')
for f in self.sourceFiles:
f.setTarget(target)
f.setBuildConfig(buildConfig)
metadata = fileMetaData.get(f.path())
f.updateWithMetadata(metadata)
def updateFileMetaData(self, fileMetaData):
logging.debug('Updating file metadata')
for f in self.sourceFiles:
fileMetaData[f.path()] = f.metadata()
pathsToRemove = self.removedFiles(fileMetaData)
for p in pathsToRemove: del fileMetaData[p]
def removedFiles(self, fileMetaData):
"Returns set of files removed since last build. Paths are project root relative."
timestampPaths = set(fileMetaData.keys())
sourceFilePaths = set([f.path() for f in self.sourceFiles])
pathsRemoved = timestampPaths.difference(sourceFilePaths)
return pathsRemoved
def buildableSourceFiles(self):
"""
Returns a list of source files that need building, and for
which dependencies are satisfied.
"""
logging.debug('Getting buildable source files')
files = []
for s in self.sourceFiles:
if self.noDependencies:
if s.isModified() and not s.isBuilt():
files.append(s)
else:
if s.needsBuilding() and s.canBuild() and not s.isBuilt():
files.append(s)
return files
def scanFileForModules(self, filePath):
usedModules = set()
containedModules = set()
includedFiles = set()
f = open(filePath,'r')
fileContent = f.read()
f.close()
matches = self.moduleUseRegEx.findall(fileContent)
for m in matches:
if m[0].lower() == 'use':
usedModules.add(m[1].lower())
elif m[0].lower() == '*copy':
includedFiles.add(m[1].lower())
else:
containedModules.add(m[1].lower())
return list(usedModules), list(containedModules), list(includedFiles)
def setupFortranDependencies(self, fortranSourceFiles):
logging.debug('Setting fortran dependencies')
self.containedModsDict = {}
usedModsDict = {}
includedFilesDict = {}
for f in fortranSourceFiles:
usedMods, containedMods, includedFiles = self.scanFileForModules(f.path())
usedModsDict[f] = usedMods
includedFilesDict[f] = includedFiles
for m in containedMods:
self.containedModsDict[m] = f
for f in fortranSourceFiles:
for usedMod in usedModsDict[f]:
fileWithUsedMod = self.containedModsDict.get(usedMod)
if not fileWithUsedMod:
# Search for dependency in other source trees
for sourceTree in self.sourceTreesDependedOn:
fileWithUsedMod = sourceTree.containedModulesDict().get(usedMod)
if fileWithUsedMod: break
if fileWithUsedMod and f != fileWithUsedMod: f.addDirectDependency(fileWithUsedMod)
for includeFile in includedFilesDict[f]:
includeFileName = includeFile + '.fh'
includedSourceFiles = [ifile for ifile in fortranSourceFiles if \
ifile.fileName().lower() == includeFileName.lower()]
if len(includedSourceFiles) == 1:
f.addDirectDependency(includedSourceFiles[0])
else:
raise Exception, 'Could not find include file %s' % (includeFileName)
# Check for cycles
print 'Checking for cyclic dependencies'
remainingFiles = set(fortranSourceFiles)
while len(remainingFiles) > 0:
checkedFiles = set()
fileStack = []
f = remainingFiles.pop()
areCyclic, failingFiles = f.dependenciesAreCyclic(fileStack, checkedFiles)
if areCyclic:
raise Exception('The following files have a cyclic dependency: %s' % (failingFiles))
else:
remainingFiles.difference_update(checkedFiles)
def __iter__(self):
return iter(self.sourceFiles)
class Target:
def __init__(self, targetInfoDict, buildRootDir, projectRoot, setupFunc = None):
self.targetInfoDict = targetInfoDict
self.isBuilt = False
self.lastConfig = None
self._sourceTree = None
self.buildRootDir = buildRootDir
self.targetDependencies = None
self.buildShouldStop = False
self.buildQueue = None
self.projectRoot = projectRoot
self.setupFuncTuple = (setupFunc,) # Using a tuple to avoid binding function to class
def mainProgramFile(self):
return self.targetInfoDict.get('mainprogramfile')
def sourceTree(self):
return self._sourceTree
def rootSourceDirectories(self):
return [os.path.join(self.projectRoot, d) for d in self.targetInfoDict['rootdirs']]
def buildSubDirectory(self):
return self.targetInfoDict['buildsubdir']
def name(self):
return self.targetInfoDict['name']
def executableName(self):
return self.targetInfoDict['exename']
def targetDependencies(self):
return self.targetDependencies
def moduleFilePath(self, buildConfig):
modulePath = [self.moduleFileDirectory(buildConfig)]
for t in self.targetDependencies:
modulePath.append(t.moduleFileDirectory(buildConfig))
return modulePath
def dependentLibraryNames(self):
names = [self.libraryName()]
for t in self.targetDependencies:
names.append(t.libraryName())
return names
def dependentLibraryPaths(self, buildConfig):
paths = [self.productLibraryPath(buildConfig)]
for t in self.targetDependencies:
paths.append(t.productLibraryPath(buildConfig))
return paths
def moduleFileDirectory(self, buildConfig):
return self.intermediateProductsDirectory(buildConfig)
def libraryName(self):
return self.targetInfoDict['libraryname']
def fullLibraryName(self):
return 'lib' + self.targetInfoDict['libraryname'] + '.a'
def compileGroups(self):
return self.targetInfoDict['compilegroups']
def buildRootDirectory(self):
"Absolute path to build root."
return self.buildRootDir
def productInstallDirectory(self, buildConfig):
return buildConfig.installDirectory()
def productLibraryDirectory(self, buildConfig):
return os.path.join(self.buildRootDirectory(), buildConfig.buildSubDirectory(), 'lib')
def productLibraryPath(self, buildConfig):
return os.path.join(self.productLibraryDirectory(buildConfig), self.fullLibraryName())
def productExecutableDirectory(self, buildConfig):
return os.path.join(self.buildRootDirectory(), buildConfig.buildSubDirectory(), 'bin')
def intermediateProductsDirectory(self, buildConfig):
return os.path.join(self.buildRootDir, self.buildSubDirectory() + '.build',
buildConfig.buildSubDirectory())
def updateTargetDependencies(self, allTargets):
self.targetDependencies = []
for targetName in self.targetInfoDict['dependson']:
target = [t for t in allTargets if t.name() == targetName][0]
self.targetDependencies.append(target)
def isFirstBuild(self, buildConfig):
return os.path.exists(self.intermediateProductsDirectory(buildConfig))
def build(self, buildConfig, allFileMetaData, clean, numThreads = 1, noDependencies = False):
if self.isBuilt and self.lastConfig == buildConfig: return True, 0
self.isBuilt = False
self.lastConfig = buildConfig
self.buildShouldStop = False
intermediatesDir = self.intermediateProductsDirectory(buildConfig)
if not os.path.exists(intermediatesDir):
os.makedirs(intermediatesDir)
dependenciesBuilt = True
numFilesBuilt = 0
for t in self.targetDependencies:
logging.debug('Building dependency target %s' % (t.name()))
dependenciesBuilt, n = t.build(buildConfig, allFileMetaData, clean, numThreads, noDependencies)
numFilesBuilt += n
if not dependenciesBuilt: break
fileMetaData = allFileMetaData.setdefault(buildConfig.name(),{}).setdefault(self.name(),{})
if clean: fileMetaData.clear()
if dependenciesBuilt and not self.buildShouldStop:
self.setBuildEnvironment(buildConfig)
self.isBuilt, n = self.compileSources(buildConfig, fileMetaData, numThreads, noDependencies)
numFilesBuilt += n
if self.isBuilt and 'exename' in self.targetInfoDict:
self.isBuilt = self.compileExecutable(buildConfig)
if not self.isBuilt:
print 'Failed to link executable for target %s' % (self.name())
return self.isBuilt, numFilesBuilt
def install(self, buildConfig):
import shutil
if 'exename' not in self.targetInfoDict: return
print 'Installing %s' % (self.name())
exeDir = os.path.join(self.projectRoot, self.productExecutableDirectory(buildConfig))
exePath = os.path.join(exeDir, self.executableName())
binDir = self.productInstallDirectory(buildConfig)
shutil.copy(exePath, binDir)
def stopBuild(self):
self.buildShouldStop = True
for t in self.targetDependencies:
t.stopBuild()
if self.buildQueue:
self.buildQueue.stopBuild()
def setBuildEnvironment(self, buildConfig):
os.environ['FORAY_TARGET_ROOT_DIRS'] = string.join([r'"%s"' % (d) for d in self.rootSourceDirectories()])
os.environ['FORAY_INTERMEDIATE_PRODUCTS_DIR'] = self.intermediateProductsDirectory(buildConfig)
os.environ['FORAY_LIBRARY_PRODUCTS_DIR'] = self.productLibraryDirectory(buildConfig)
os.environ['FORAY_EXECUTABLE_PRODUCTS_DIR'] = self.productExecutableDirectory(buildConfig)
os.environ['FORAY_INSTALL_DIR'] = self.productInstallDirectory(buildConfig)
def compileSources(self, buildConfig, fileMetaData, numThreads, noDependencies):
print 'Starting build for target "%s" with config "%s"' % (self.name(), buildConfig.name())
libDirPath = self.productLibraryDirectory(buildConfig)
if not os.path.exists(libDirPath):
os.makedirs(libDirPath)
if self.setupFuncTuple[0]:
self.setupFuncTuple[0](self.projectRoot,
self.rootSourceDirectories(),
self.intermediateProductsDirectory(buildConfig),
self.productLibraryDirectory(buildConfig),
self.productExecutableDirectory(buildConfig),
self.productInstallDirectory(buildConfig) )
if not self._sourceTree:
mainProgramFile = self.targetInfoDict.get('mainprogramfile')
self._sourceTree = SourceTree(self.rootSourceDirectories(),
[t.sourceTree() for t in self.targetDependencies],
self.projectRoot,
self.targetInfoDict['skipdirs'],
self.targetInfoDict['skipfiles'],
mainProgramFile,
noDependencies)
self.unarchiveBuildProducts(buildConfig, self._sourceTree.removedFiles(fileMetaData))
logging.debug('Updating file status')
self._sourceTree.updateFileStatus(fileMetaData, buildConfig, self)
libFilePath = os.path.join(self.productLibraryDirectory(buildConfig), self.fullLibraryName())
self.buildQueue = BuildQueue(self._sourceTree, buildConfig, self, libFilePath, numThreads)
success = False
numFilesBuilt = 0
if not self.buildShouldStop:
success, numFilesBuilt = self.buildQueue.buildSource()
if success and numFilesBuilt > 0:
# Run ranlib
indexLibCommand = buildConfig.indexLibraryCommand(self)
logging.debug('Indexing library: ' + indexLibCommand)
success, output, error = RunSubprocess(indexLibCommand)
if not success:
print 'ranlib failed'
print output
print error
self.buildQueue = None
self._sourceTree.updateFileMetaData(fileMetaData)
if success:
statusString = 'Compiled library'
elif self.buildShouldStop:
statusString = 'Compiling interrupted'
else:
statusString = 'Failed to build library'
print statusString + ' for target "%s" and config "%s"' % (self.name(), buildConfig.name())
return success, numFilesBuilt
def compileExecutable(self, buildConfig):
exeDirPath = self.productExecutableDirectory(buildConfig)
if not os.path.exists(exeDirPath): os.makedirs(exeDirPath)
os.chdir(exeDirPath)
exeCommand = buildConfig.linkExecutableCommand(self)
print 'Compiling executable for %s' % (self.name())
logging.debug('Compile command: %s' % (exeCommand))
success, output, error = RunSubprocess(exeCommand)
if not success:
if output: print output
if error: print error
return success
def archiveBuildProducts(self, buildConfig, sourceFiles):
print 'Archiving object files'
sourceFilesToArchive = [s for s in sourceFiles if not s.isMainProgram()]
if len(sourceFilesToArchive) == 0: return
command = buildConfig.archiveCommand(self, sourceFilesToArchive)
logging.debug('Archiving command: %s' % (command))
if command:
success, output, error = RunSubprocess(command)
if not success:
if output: print output
if error: print error
def unarchiveBuildProducts(self, buildConfig, sourceFilePaths):
"Removes object files corresponding to the project relative paths passed."
print 'Removing object files for which source files no longer exist'
sourceFiles = [self._sourceTree.createSourceFileForPath(p) for p in sourceFilePaths]
sourceFiles = [f for f in sourceFiles if f.generatesObjectFile()]
if len(sourceFiles) == 0: return
command = buildConfig.unarchiveCommand(self, sourceFiles)
logging.debug('Unarchiving command: %s' % (command))
if command:
success, output, error = RunSubprocess(command)
if not success:
if output: print output
if error: print error
class BuildConfig:
def __init__(self, configDict, projectRoot):
self.configDict = configDict
self.projectRoot = projectRoot
def name(self):
return self.configDict['name']
def installDirectory(self):
return ExpandShellArgs(self.configDict['installdir'])
def buildSubDirectory(self):
return self.configDict['buildsubdir']
def compileGroupForFile(self, target, sourceFile):
fileName = os.path.split(sourceFile.path())[1]
compileGroups = target.compileGroups()
fileGroup = 'default'
for groupName, fileNames in compileGroups.iteritems():
if fileName in fileNames:
fileGroup = groupName
break
return fileGroup
def modulePathOptions(self, target):
modulePath = target.moduleFilePath(self)
optionString = self.configDict['compileroptions']['modpathoption']
moduleString = ''
if len(modulePath) > 0:
moduleString = reduce( lambda x, y: '%s %s "%s"' % (x, optionString, y), modulePath, '' )
return moduleString
def linkLibraryOptions(self, target):
libraryPath = '-L"%s" ' % (target.productLibraryDirectory(self))
dependentLibraryNames = target.dependentLibraryNames()
dependentLibraryPaths = target.dependentLibraryPaths(self)
dependentLibraryNames = [l[0] for l in zip(dependentLibraryNames, dependentLibraryPaths) \
if os.path.exists(l[1])] # Filter non-existent libraries out
optionsString = ''
if len(dependentLibraryNames) > 0:
optionsString = reduce( lambda x, y: '%s -l%s' % (x, y), dependentLibraryNames, libraryPath )
return optionsString
def fortranCompileCommand(self, target, sourceFile, compilerKey, flagsKey):
compilerOptionsDict = self.configDict['compileroptions']
compileGroup = self.compileGroupForFile(target, sourceFile)
compileGroupFlags = compilerOptionsDict['compilegroupflags'][compileGroup]
compiler = compilerOptionsDict[compilerKey]
flags = compilerOptionsDict[flagsKey]
modPathFlags = self.modulePathOptions(target)
sourceFilePath = os.path.join(self.projectRoot, sourceFile.preprocessedFilePath())
return '%s %s %s %s "%s"' % (compiler, flags, compileGroupFlags, modPathFlags, sourceFilePath)
def fortran77CompileCommand(self, target, sourceFile):
return self.fortranCompileCommand(target, sourceFile, 'f77compiler', 'f77flags')
def fortran90CompileCommand(self, target, sourceFile):
return self.fortranCompileCommand(target, sourceFile, 'f90compiler', 'f90flags')
def cCompileCommand(self, target, sourceFile):
compilerOptionsDict = self.configDict['compileroptions']
compileGroup = self.compileGroupForFile(target, sourceFile)
compileGroupFlags = compilerOptionsDict['compilegroupflags'][compileGroup]
compiler = compilerOptionsDict['ccompiler']
flags = compilerOptionsDict['cflags']
sourceFilePath = os.path.join(self.projectRoot, sourceFile.preprocessedFilePath())
return '%s %s %s "%s"' % (compiler, flags, compileGroupFlags, sourceFilePath)
def archiveCommand(self, target, sourceFiles):
"""
Should only be called once object files have been created, because
it checks for their existence.
"""
libPath = target.productLibraryPath(self)
intermedPath = target.intermediateProductsDirectory(self)
paths = [s.objectFileName() for s in sourceFiles if
s.generatesObjectFile() and
os.path.exists(os.path.join(intermedPath,s.objectFileName()))]
paths = string.join(paths)
if len(paths) == 0: return None
changeDirCommand = 'cd "%s"' % (intermedPath)
arCommand = self.configDict['compileroptions']['archivecommand']
arCommand = '%s "%s" %s' % (arCommand, libPath, paths)
removeCommand = 'rm ' + paths
return string.join([changeDirCommand, arCommand, removeCommand], ' ; ')
def unarchiveCommand(self, target, sourceFiles):
libPath = target.productLibraryPath(self)
objects = string.join([s.objectFileName() for s in sourceFiles if s.generatesObjectFile()])
if len(objects) == 0: return None
unarchCommand = self.configDict['compileroptions']['unarchivecommand']
unarchCommand = '%s "%s" %s' % (unarchCommand, libPath, objects)
return unarchCommand
def indexLibraryCommand(self, target):
libPath = target.productLibraryPath(self)
ranlibCommand = self.configDict['compileroptions']['ranlibcommand']
return 'if [ -e "%s" ]; then %s "%s" ; fi' % (libPath, ranlibCommand, libPath)
def linkExecutableCommand(self, target):
mainProgramFileName = target.mainProgramFile()
mainSourceFile = target.sourceTree().sourceFileWithName(mainProgramFileName)
mainObjectName = mainSourceFile.objectFileName()
intermedPath = target.intermediateProductsDirectory(self)
mainObjectPath = os.path.join(intermedPath, mainObjectName)
exeName = target.executableName()
libs = self.linkLibraryOptions(target)
c = self.configDict['compileroptions']
linkCommand = '%s %s -o %s "%s" %s %s %s' % \
(c['link'], c['linkflags'], exeName, mainObjectPath, c['prioritylibs'], libs, c['otherlibs'])
return linkCommand
class BuildQueue:
"""
This class schedules file compilations. It takes account of dependencies, and
is optimized for working on parallel systems.
"""
def __init__(self, sourceTree, buildConfig, target, libFilePath, numThreads):
import threading
self.sourceTree = sourceTree
self.buildConfig = buildConfig
self.target = target
self.libFilePath = libFilePath
self.numParallelThreads = numThreads
self.buildableSourceFilesLock = threading.Lock()
self.builtSourceFilesLock = threading.Lock()
self.buildShouldStop = False
def buildSourceFilesInThread(self):
def getNextSourceFile():
self.buildableSourceFilesLock.acquire()
if len(self.buildableSourceFiles) > 0:
f = self.buildableSourceFiles.pop()
else:
f = None
self.buildableSourceFilesLock.release()
return f
try:
f = getNextSourceFile()
while f and not self.buildShouldStop:
success = f.build()
if success:
self.builtSourceFilesLock.acquire()
self.builtSourceFiles.append(f)
self.builtSourceFilesLock.release()
f = getNextSourceFile()
else:
self.buildShouldStop = True
except Exception, e:
print 'An error occurred: ', e
self.buildShouldStop = True
ArchiveThreshold = 30
def buildSource(self):
import threading
self.buildShouldStop = False
self.buildableSourceFiles = self.sourceTree.buildableSourceFiles()
numFilesBuilt = 0
numFilesBuiltSinceLastArchive = 0
self.builtSourceFiles = []
while len(self.buildableSourceFiles) > 0 and not self.buildShouldStop:
numBuiltBefore = len(self.builtSourceFiles)
threads = []
for threadIndex in range(self.numParallelThreads):
threads.append( threading.Thread(target=self.buildSourceFilesInThread) )
threads[-1].start()
for thread in threads:
thread.join()
numBuiltThisRound = len(self.builtSourceFiles) - numBuiltBefore
numFilesBuilt += numBuiltThisRound
numFilesBuiltSinceLastArchive += numBuiltThisRound
if numFilesBuiltSinceLastArchive >= BuildQueue.ArchiveThreshold:
self.target.archiveBuildProducts(self.buildConfig, self.builtSourceFiles)
numFilesBuiltSinceLastArchive = 0
self.builtSourceFiles = []
if not self.buildShouldStop:
self.buildableSourceFiles = self.sourceTree.buildableSourceFiles()
self.target.archiveBuildProducts(self.buildConfig, self.builtSourceFiles)
return (not self.buildShouldStop), numFilesBuilt
def stopBuild(self):
self.buildShouldStop = True
class Builder:
def __init__(self, buildInfo, targetNames, configNames, projectRoot, numThreads, clean, noDependencies, debug):
# Store ivars
self.buildInfo = buildInfo
self.projectRoot = projectRoot
self.numThreads = numThreads
self.clean = clean
self.noDependencies = noDependencies
self.buildCancelled = False
self.debug = debug
# Setup build configurations
self.buildConfigsToBuild = []
if not configNames or len(configNames) == 0:
configNames = [self.buildInfo['defaultconfig']] #default
for configName in configNames:
configDicts = [d for d in self.buildInfo['configs'] if d['name'] == configName]
if len(configDicts) != 1:
raise Exception, 'Invalid configuration %s' % (configName)
configDict = configDicts[0]
flattenedConfigDict = self.flattenConfigInheritance(configDict)
logging.debug('Flattened config dict for %s: %s' % (configDict['name'], str(flattenedConfigDict)))
buildConfig = BuildConfig(flattenedConfigDict, self.projectRoot)
self.buildConfigsToBuild.append(buildConfig)
# Setup targets
self.currentTarget = None
self.allTargets = []
self.targetsToBuild = []
if not targetNames or len(targetNames) == 0:
targetNames = [t['name'] for t in self.buildInfo['targets']] # use all
for targetDict in self.buildInfo['targets']:
target = Target(targetDict, ExpandShellArgs(self.buildInfo['builddir']), self.projectRoot,
self.buildInfo.get('firstbuildfunc') )
self.allTargets.append(target)
if targetDict['name'] in targetNames:
self.targetsToBuild.append(target)
for target in self.allTargets:
target.updateTargetDependencies(self.allTargets)
def flattenConfigInheritance(self, configDict):
import copy
def recursiveUpdate(dictToUpdate, dictToUpdateWith):
"Recursively update a tree of dictionaries"
for key,value in dictToUpdateWith.iteritems():
if key in dictToUpdate and isinstance(value, dict):
recursiveUpdate(dictToUpdate[key], dictToUpdateWith[key])
else:
dictToUpdate[key] = copy.deepcopy(value)
def inheritFromDict(inheritingDict, resultDict):
"Inherit the contents of one dictionary in another"
if 'inherits' in inheritingDict:
configName = inheritingDict['inherits']
inheritedDict = [d for d in self.buildInfo['configs'] if d['name'] == configName][0]
inheritFromDict(inheritedDict, resultDict)
recursiveUpdate(resultDict, inheritingDict)
flattenedDict = {}
inheritFromDict(configDict, flattenedDict)
return flattenedDict
def build(self):
import pprint, pickle
# read old fileMetaData
fileMetaDataFilePath = os.path.join(ExpandShellArgs(self.buildInfo['builddir']), 'foray.fileMetaData')
allFileMetaData = None
if os.path.exists(fileMetaDataFilePath):
f = open(fileMetaDataFilePath, 'rb')
allFileMetaData = pickle.load(f)
f.close()
if not allFileMetaData:
allFileMetaData = {}
# Build each combination of target and config
success = True
allTargetsDict = dict([(target.name(), target) for target in self.allTargets])
for config in self.buildConfigsToBuild:
if 'prepareconfigfunc' in self.buildInfo:
self.buildInfo['prepareconfigfunc'](config.name())
for target in self.targetsToBuild:
self.currentTarget = target
success, numFilesBuilt = target.build(config, allFileMetaData, self.clean, self.numThreads,
self.noDependencies)
if numFilesBuilt > 0:
print 'Storing file meta data'
if self.debug:
f = open(fileMetaDataFilePath + '.debug', 'w')
pprint.pprint(allFileMetaData, f)
f.close()
f = open(fileMetaDataFilePath, 'wb')
pickle.dump(allFileMetaData, f)
f.close()
if not success: break
if not success: break
self.currentTarget = None
return success
def install(self):
"Installs the target products in the respective bin directories."
for t in self.targetsToBuild:
for b in self.buildConfigsToBuild:
t.install(b)
def handleStopSignal(self, signalNum, frame):
self.buildCancelled = True
if self.currentTarget: self.currentTarget.stopBuild()
# -----------------
# Main program
# -----------------
# Read environment variables
numThreads = os.environ.setdefault('FORAY_NUM_THREADS', '1')
# Parse input arguments
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-d", "--debug", action="store_true", dest="debug", help="Print debug info.",
default=False)
parser.add_option("-c", "--clean", action="store_true", dest="clean", help="Whether to rebuild completely.",
default=False)
parser.add_option("-j", "--threads", type="int", dest="numthreads", help="The number of threads to use.",
default=int(numThreads))
parser.add_option("-i", "--buildinfo", type="string", dest="buildinfofile", help="The build info file name.",
default='buildinfo')
parser.add_option("-b", "--buildconfig", type="string", action="append", dest="configs",
help="The configuration to build.")
parser.add_option("-n", "--nodepends", action="store_true", dest="nodepends", help="Do not account for file dependencies.",
default=False)
(options, targets) = parser.parse_args()
# Debugging
if options.debug: logging.getLogger('').setLevel(logging.DEBUG)
# Build info
import signal
buildinfoGlobals = {}
execfile(options.buildinfofile, buildinfoGlobals)
buildInfo = buildinfoGlobals['buildinfo']
# File types
FortranSourceFile.configure(buildInfo.get('fortranfiles'))
CSourceFile.configure(buildInfo.get('cfiles'))
# Project root
if 'projectroot' in buildInfo:
projectRoot = ExpandShellArgs(buildInfo['projectroot'])
else:
projectRoot = os.getcwd()
os.environ['FORAY_PROJECT_ROOT'] = projectRoot
# Create builder and build
builder = Builder(buildInfo, targets, options.configs, projectRoot, options.numthreads,
options.clean, options.nodepends, options.debug)
signal.signal(signal.SIGINT, builder.handleStopSignal)
signal.signal(signal.SIGQUIT, builder.handleStopSignal)
signal.signal(signal.SIGABRT, builder.handleStopSignal)
if options.debug:
if builder.build(): builder.install()
else:
try:
if builder.build():
builder.install()
except Exception, e:
print 'Foray Error: ' + str(e.args[0])
# ------------------------------------------------------------------------------------
# Copyright (c) 2008, Drew McCormack
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
# Neither the name of the Vrije Universiteit (Amsterdam) nor the names of its
# contributors may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.=======
| Python |
#!/usr/bin/env python
import os, os.path, sys, re, string, logging, subprocess
import time, shutil, threading
# ---------
# Globals
# ---------
PrintLock = threading.Lock()
# ----------
# Exceptions
# ----------
class InterruptException (Exception):
pass
# ---------
# Logging
# ---------
logging.getLogger('').setLevel(logging.WARNING)
# ---------
# Functions
# ---------
def ExpandShellArgs(argsString):
result = os.popen('echo %s' % (argsString)).read() # Perform any shell substitutions
result = result[:-1] # Chomp newline
return result
def MostRecentDate(date1, date2):
"Allows for None values."
if date1 and date2:
if date1 < date2:
return date2
else:
return date1
elif date1:
return date1
else:
return date2
def RunSubprocess(command):
import subprocess
success = True
try:
process = subprocess.Popen(command, shell=True, stderr=subprocess.PIPE, stdout=subprocess.PIPE)
output, error = process.communicate()
except OSError:
success = False
if success:
if process.returncode < 0:
print 'Build Interrupted'
success = False
elif process.returncode > 0:
success = False
return (success, output, error)
# ---------
# Classes
# ---------
class SourceFile:
preprocessFuncs = {}
def __init__(self, path, projectRoot):
self._path = path
self._directDependencies = []
self._isMainProgram = False
self._projectRoot = projectRoot
self._dependencyCyclesChecked = False
self._buildConfig = None
self._target = None
self.resetBuildMetadata()
self._checksum = None
def resetBuildMetadata(self):
self._lastBuilt = None
self._mostRecentBuildOfDependency = None
self._lastModified = None
self._buildTime = None
self._buildFailed = False
self._markedForBuilding = False
self._needsBuilding = None
self._lastCompileCommand = None
self._buildCompileCommand = None
self._lastChecksum = None
self._buildChecksum = None
def updateWithMetadata(self, metadata):
"Metadata is a tuple that is used for persisting the file to disk."
if metadata:
timestamp, compileCommand, checksum = metadata
self.setLastBuilt(timestamp)
self.setBuildTime(timestamp)
self.setLastCompileCommand(compileCommand)
self.setBuildCompileCommand(compileCommand)
self.setLastChecksum(checksum)
self.setBuildChecksum(checksum)
else: # Reset
self.resetBuildMetadata()
def metadata(self):
return (self.buildTime(), self.buildCompileCommand(), self.buildChecksum())
def requiresPreprocessing(self):
return False
def preprocessedFilePath(self):
return self._path
def path(self):
"Relative to the project root"
return self._path
def fileName(self):
return os.path.basename(self._path)
def absolutePath(self):
return os.path.join(self._projectRoot, self.path())
def generatesObjectFile(self):
return True
def objectFileName(self):
pathWithoutExt = os.path.splitext(self.path())[0]
return os.path.basename(pathWithoutExt) + '.o'
def setBuildConfig(self, config):
import weakref
self._needsBuilding = None
self._buildConfig = weakref.proxy(config)
def buildConfig(self):
return self._buildConfig
def setTarget(self, target):
import weakref
self._needsBuilding = None
self._target = weakref.proxy(target)
def target(self):
return self._target
def setLastModified(self, lastModified):
self._needsBuilding = None
self._lastModified = lastModified
def lastModified(self):
return self._lastModified
def setLastBuilt(self, lastBuilt):
self._needsBuilding = None
self._lastBuilt = lastBuilt
def lastBuilt(self):
return self._lastBuilt
def setLastCompileCommand(self, flags):
self._needsBuilding = None
self._lastCompileCommand = flags
def buildCompileCommand(self):
return self._buildCompileCommand
def setBuildCompileCommand(self, flags):
self._buildCompileCommand = flags
def lastCompileCommand(self):
return self._lastCompileCommand
def compileCommand(self):
"Depends on build config, so created on the fly, and not stored."
return None
def setLastChecksum(self, checksum):
self._needsBuilding = None
self._lastChecksum = checksum
def lastChecksum(self):
return self._lastChecksum
def setChecksum(self, checksum):
self._needsBuilding = None
self._checksum = checksum
def checksum(self):
return self._checksum
def setBuildChecksum(self, checksum):
self._buildChecksum = checksum
def buildChecksum(self):
return self._buildChecksum
def setBuildTime(self, buildTime):
"Most recent build time, including the current build"
self._buildTime = buildTime
def buildTime(self):
return self._buildTime
def buildFailed(self):
return self._buildFailed
def setIsMainProgram(self, yn):
self._isMainProgram = yn
def isMainProgram(self):
return self._isMainProgram
def checksumOfFile(self):
import md5
fl = open(self.absolutePath(),'r')
checksum = md5.new(fl.read()).digest()
fl.close()
return checksum
def build(self):
self._buildFailed = False
intermediateProductsDir = self._target.intermediateProductsDirectory(self._buildConfig)
os.chdir( intermediateProductsDir )
if self.preprocess():
if self.buildPreprocessedFile():
self.setBuildTime(time.time())
self.setBuildChecksum(self.checksum())
self.setBuildCompileCommand(self.compileCommand())
else:
self._buildFailed = True
else:
self._buildFailed = True
return not self._buildFailed
def buildPreprocessedFile(self):
return self.runCompileCommand(self.compileCommand())
def runCompileCommand(self, compileCommand):
if compileCommand == None: return True
PrintLock.acquire()
print 'Compiling %s' % (self.path())
logging.debug('Compile Command: %s' % (compileCommand))
PrintLock.release()
success, output, error = RunSubprocess(compileCommand + ' 2>&1')
if not success:
# Check if preprocessed file was empty. If so, ignore error
f = open(self.preprocessedFilePath(), 'r')
if f.read().strip() == '':
success = True
else:
PrintLock.acquire()
print output
PrintLock.release()
f.close()
else:
# If the compile succeeded, check the output for any warnings to print
if re.search('warn', output, re.IGNORECASE):
print output
return success
def isBuilt(self):
"Whether file has been built in this build."
if None == self._buildTime:
return False
else:
return self._buildTime > self._lastBuilt
def isModified(self):
if not self._lastChecksum:
return True
else:
return (self._checksum != self._buildChecksum) or (self._lastModified > self._lastBuilt)
def compileCommandHasChanged(self):
return self._lastCompileCommand != self.compileCommand()
def addDirectDependency(self, sourceFile):
self._directDependencies.append(sourceFile)
def dependenciesAreCyclic(self, fileStack, checkedFiles):
checkedFiles.add(self)
if self._dependencyCyclesChecked:
fileStack.append(self)
return False, None
self._dependencyCyclesChecked = True
if self in fileStack:
return True, [f.path() for f in fileStack]
else:
fileStack.append(self)
for d in self._directDependencies:
areCyclic, failingFiles = d.dependenciesAreCyclic(fileStack, checkedFiles)
fileStack.pop()
if areCyclic:
return True, failingFiles
return False, None
def mostRecentBuildOfDependency(self):
if self._mostRecentBuildOfDependency:
return self._mostRecentBuildOfDependency
mostRecent = None
for dep in self._directDependencies:
mostRecent = MostRecentDate(mostRecent, dep.lastBuilt())
mostRecent = MostRecentDate(mostRecent, dep.mostRecentBuildOfDependency())
self._mostRecentBuildOfDependency = mostRecent # Cache value
return mostRecent
def needsBuilding(self):
"""
Checks whether a dependent was compiled more recently than
this file, or needs to be compiled.
"""
if None != self._needsBuilding:
return self._needsBuilding # Use cached result for performance
needsBuilding = False
if self.isModified() or self.compileCommandHasChanged():
needsBuilding = True
elif self.mostRecentBuildOfDependency() and self.lastBuilt() and \
(self.mostRecentBuildOfDependency() > self.lastBuilt()):
needsBuilding = True
else:
for dep in self._directDependencies:
if dep.needsBuilding():
needsBuilding = True
break
self._needsBuilding = needsBuilding # Cache result for performance
return needsBuilding
def canBuild(self):
"""
Whether or not all the dependencies are satisfied to allow the file
to be built.
"""
if self._buildFailed: return False
canBuild = True
for dep in self._directDependencies:
if dep.needsBuilding() and not dep.isBuilt():
canBuild = False
break
return canBuild
def preprocessedFilePath(self):
f = self.preprocessFuncs.get('preprocessedFileNameFunction')
if f:
filename = f(self.fileName())
else:
filename = self.fileName()
return os.path.join(self.target().intermediateProductsDirectory(self.buildConfig()), filename)
def preprocess(self):
if self.requiresPreprocessing():
f = self.preprocessFuncs['preprocessorFunction']
return f(self.absolutePath(), self.target().intermediateProductsDirectory(self.buildConfig()))
else:
# Copy the source file
preprocessedPath = self.preprocessedFilePath()
shutil.copy(self.absolutePath(), preprocessedPath)
return True
TreeStringIndentLevel = 4
def dependencyString(self, indent):
aString = ''
for dependentFile in self._directDependencies:
aString += '\n' + indent * ' ' + dependentFile.path()
aString += dependentFile.dependencyString(indent + self.TreeStringIndentLevel)
return aString
def __str__(self):
s = '%s %s\n' %(str(self.__class__), self.path())
s += 'Last Built: %s\n' % (self.lastBuilt())
s += 'Last Modified: %s\n' % (self.lastModified())
s += 'Can Build: %s\n' % (self.canBuild())
s += 'Needs Building: %s\n' % (self.needsBuilding())
s += 'Marked for Building: %s\n' % (self.markedForBuilding())
s += 'Dependencies'
s += self.dependencyString(self.TreeStringIndentLevel) + '\n'
return s
class FortranSourceFile (SourceFile):
freeFormRegEx = '.*\.(F|f90|F90)$'
fixedFormRegEx = '.*\.f$'
freeFormPreprocessRegEx = None
fixedFormPreprocessRegEx = None
includeFileRegEx = None
@classmethod
def configure(cls, infoDict):
if not infoDict: return
cls.freeFormRegEx = infoDict.setdefault('freeformregex', cls.freeFormRegEx)
cls.fixedFormRegEx = infoDict.setdefault('fixedformregex', cls.fixedFormRegEx)
cls.freeFormPreprocessRegEx = infoDict.setdefault('freeformpreprocessregex', cls.freeFormPreprocessRegEx)
cls.fixedFormPreprocessRegEx = infoDict.setdefault('fixedformpreprocessregex', cls.fixedFormPreprocessRegEx)
cls.includeFileRegEx = infoDict.setdefault('includefileregex', cls.includeFileRegEx)
cls.preprocessFuncs['preprocessorFunction'] = infoDict.setdefault('preprocessfunc', None)
cls.preprocessFuncs['preprocessedFileNameFunction'] = infoDict.setdefault('preprocessednamefunc', None)
@classmethod
def fileNameMatchesType(cls, fileName):
return ( cls.freeFormRegEx and re.match(cls.freeFormRegEx, fileName) ) or \
( cls.fixedFormRegEx and re.match(cls.fixedFormRegEx, fileName) ) or \
( cls.freeFormPreprocessRegEx and re.match(cls.freeFormPreprocessRegEx, fileName) ) or \
( cls.fixedFormPreprocessRegEx and re.match(cls.fixedFormPreprocessRegEx, fileName) )
@classmethod
def allFileRegExs(cls):
all = []
if cls.freeFormRegEx: all.append(cls.freeFormRegEx)
if cls.fixedFormRegEx: all.append(cls.fixedFormRegEx)
if cls.freeFormPreprocessRegEx: all.append(cls.freeFormPreprocessRegEx)
if cls.fixedFormPreprocessRegEx: all.append(cls.fixedFormPreprocessRegEx)
if cls.includeFileRegEx: all.append(cls.includeFileRegEx)
return all
def requiresPreprocessing(self):
return \
( self.freeFormPreprocessRegEx and
re.match(self.freeFormPreprocessRegEx, self.fileName()) ) or \
( self.fixedFormPreprocessRegEx and
re.match(self.fixedFormPreprocessRegEx, self.fileName()) )
def isFreeForm(self):
return \
( self.freeFormPreprocessRegEx and
re.match(self.freeFormPreprocessRegEx, self.fileName()) ) or \
( self.freeFormRegEx and
re.match(self.freeFormRegEx, self.fileName()) )
def isFixedForm(self):
return \
( self.fixedFormPreprocessRegEx and
re.match(self.fixedFormPreprocessRegEx, self.fileName()) ) or \
( self.fixedFormRegEx and
re.match(self.fixedFormRegEx, self.fileName()) )
def generatesObjectFile(self):
return not ( self.includeFileRegEx and
re.match(self.includeFileRegEx, self.fileName()) )
def compileCommand(self):
if self.isFixedForm():
compileCommand = self.buildConfig().fortran77CompileCommand(self.target(), self)
elif self.isFreeForm():
compileCommand = self.buildConfig().fortran90CompileCommand(self.target(), self)
else:
compileCommand = None
return compileCommand
class CSourceFile (SourceFile):
fileNameRegEx = '.*\.c$'
includeFileRegEx = '.*\.h$'
preprocessFileNameRegEx = None
@classmethod
def configure(cls, infoDict):
if not infoDict: return
cls.fileNameRegEx = infoDict.setdefault('fileregex', cls.fileNameRegEx)
cls.includeFileRegEx = infoDict.setdefault('includefileregex', cls.includeFileRegEx)
cls.preprocessFileNameRegEx = infoDict.setdefault('preprocessfileregex', cls.preprocessFileNameRegEx)
cls.preprocessFuncs['preprocessorFunction'] = infoDict.get('preprocessfunc')
cls.preprocessFuncs['preprocessedFileNameFunction'] = infoDict.get('preprocessednamefunc')
@classmethod
def fileNameMatchesType(cls, fileName):
return (cls.fileNameRegEx and re.match(cls.fileNameRegEx, fileName)) or \
(cls.includeFileRegEx and re.match(cls.includeFileRegEx, fileName)) or \
(cls.preprocessFileNameRegEx and re.match(cls.preprocessFileNameRegEx, fileName))
@classmethod
def allFileRegExs(cls):
all = []
if cls.fileNameRegEx: all.append(cls.fileNameRegEx)
if cls.preprocessFileNameRegEx: all.append(cls.preprocessFileNameRegEx)
if cls.includeFileRegEx: all.append(cls.includeFileRegEx)
return all
def requiresPreprocessing(self):
"Whether an extra preprocessor has to be run (on top of the standard C preprocessor)"
return self.preprocessFileNameRegEx and re.match(self.preprocessFileNameRegEx, self.fileName())
def generatesObjectFile(self):
return not ( self.includeFileRegEx and re.match(self.includeFileRegEx, self.fileName()) )
def compileCommand(self):
if (self.fileNameRegEx and re.match(self.fileNameRegEx, self.fileName()) ) or \
(self.preprocessFileNameRegEx and re.match(self.preprocessFileNameRegEx, self.fileName())):
compileCommand = self.buildConfig().cCompileCommand(self.target(), self)
else:
compileCommand = None
return compileCommand
class SourceTree:
def __init__(self, rootDirs, sourceTreesDependedOn, projectRoot, skipdirs, skipfiles,
mainProgramFile = None, noDependencies = False):
self.rootDirs= rootDirs
self.projectRoot = projectRoot
self.skipfiles = set(skipfiles)
self.skipdirs = set(skipdirs)
self.mainProgramFile = mainProgramFile
self.moduleUseRegEx = re.compile(r'^\s*(use|module|\*copy)\s+([\d\w_]+)', re.IGNORECASE | re.MULTILINE)
self.sourceTreesDependedOn = sourceTreesDependedOn
self.noDependencies = noDependencies
self.sourceFiles = self.createSourceFiles()
def sourceFiles(self):
return self.sourceFiles
def sourceFileWithName(self, name):
matchingFiles = [f for f in self.sourceFiles if self.mainProgramFile == os.path.basename(f.path())]
if len(matchingFiles) == 1:
return matchingFiles[0]
else:
return None
def containedModulesDict(self):
"Module names contained in each file in tree, with file path as key"
return self.containedModsDict
def createSourceFiles(self):
"""
Create source file objects representing source files in the file
system.
"""
sourceFiles = []
def addFiles(regExString, sourceFileClass):
if not regExString: return
for rootDir in self.rootDirs:
files = self.locateFiles(regExString, rootDir)
for path, modDate, checksum in files:
newFile = sourceFileClass(path, self.projectRoot)
newFile.setLastModified(modDate)
newFile.setChecksum(checksum)
if os.path.basename(path) == self.mainProgramFile:
newFile.setIsMainProgram(True)
sourceFiles.append(newFile)
logging.debug('Searching for fortran source files')
for regEx in FortranSourceFile.allFileRegExs():
addFiles(regEx, FortranSourceFile)
if not self.noDependencies: self.setupFortranDependencies(sourceFiles)
logging.debug('Searching for c source files')
for regEx in CSourceFile.allFileRegExs():
addFiles(regEx, CSourceFile)
return sourceFiles
def createSourceFileForPath(self, path):
"Factory method to create a SourceFile object for the path given."
fileName = os.path.basename(path)
if FortranSourceFile.fileNameMatchesType(fileName):
return FortranSourceFile(path, self.projectRoot)
elif CSourceFile.fileNameMatchesType(fileName):
return CSourceFile(path, self.projectRoot)
else:
raise Exception, 'Unknown file type in sourceFileForPath'
def locateFiles(self, fileNameRegEx, rootDir):
"""
Locates files matching reg ex passed. Returns list of tuples,
containing file path and modification date.
"""
import md5
logging.debug('locating files in directory %s' % (rootDir))
fileTuples = []
regEx = re.compile(fileNameRegEx)
os.chdir(self.projectRoot)
for root, dirs, files in os.walk(rootDir):
for skipdir in self.skipdirs:
if skipdir in dirs: dirs.remove(skipdir)
for f in files:
if os.path.basename(f) in self.skipfiles: continue
if regEx.match(f):
filePath = os.path.join(root,f)
prefix = os.path.commonprefix([filePath, self.projectRoot])
filePath = filePath[len(prefix):]
if filePath[0] == os.sep: filePath = filePath[1:]
fl = open(filePath,'r')
checksum = md5.new(fl.read()).digest()
fl.close()
fileTuples.append( (filePath, os.path.getmtime(filePath), checksum) )
return fileTuples
def updateFileStatus(self, fileMetaData, buildConfig, target):
logging.debug('Updating file status')
for f in self.sourceFiles:
f.setTarget(target)
f.setBuildConfig(buildConfig)
metadata = fileMetaData.get(f.path())
f.updateWithMetadata(metadata)
def updateFileMetaData(self, fileMetaData):
logging.debug('Updating file metadata')
for f in self.sourceFiles:
fileMetaData[f.path()] = f.metadata()
pathsToRemove = self.removedFiles(fileMetaData)
for p in pathsToRemove: del fileMetaData[p]
def removedFiles(self, fileMetaData):
"Returns set of files removed since last build. Paths are project root relative."
timestampPaths = set(fileMetaData.keys())
sourceFilePaths = set([f.path() for f in self.sourceFiles])
pathsRemoved = timestampPaths.difference(sourceFilePaths)
return pathsRemoved
def buildableSourceFiles(self):
"""
Returns a list of source files that need building, and for
which dependencies are satisfied.
"""
logging.debug('Getting buildable source files')
files = []
for s in self.sourceFiles:
if self.noDependencies:
if s.isModified() and not s.isBuilt():
files.append(s)
else:
if s.needsBuilding() and s.canBuild() and not s.isBuilt():
files.append(s)
return files
def scanFileForModules(self, filePath):
usedModules = set()
containedModules = set()
includedFiles = set()
f = open(filePath,'r')
fileContent = f.read()
f.close()
matches = self.moduleUseRegEx.findall(fileContent)
for m in matches:
if m[0].lower() == 'use':
usedModules.add(m[1].lower())
elif m[0].lower() == '*copy':
includedFiles.add(m[1].lower())
else:
containedModules.add(m[1].lower())
return list(usedModules), list(containedModules), list(includedFiles)
def setupFortranDependencies(self, fortranSourceFiles):
logging.debug('Setting fortran dependencies')
self.containedModsDict = {}
usedModsDict = {}
includedFilesDict = {}
for f in fortranSourceFiles:
usedMods, containedMods, includedFiles = self.scanFileForModules(f.path())
usedModsDict[f] = usedMods
includedFilesDict[f] = includedFiles
for m in containedMods:
self.containedModsDict[m] = f
for f in fortranSourceFiles:
for usedMod in usedModsDict[f]:
fileWithUsedMod = self.containedModsDict.get(usedMod)
if not fileWithUsedMod:
# Search for dependency in other source trees
for sourceTree in self.sourceTreesDependedOn:
fileWithUsedMod = sourceTree.containedModulesDict().get(usedMod)
if fileWithUsedMod: break
if fileWithUsedMod and f != fileWithUsedMod: f.addDirectDependency(fileWithUsedMod)
for includeFile in includedFilesDict[f]:
includeFileName = includeFile + '.fh'
includedSourceFiles = [ifile for ifile in fortranSourceFiles if \
ifile.fileName().lower() == includeFileName.lower()]
if len(includedSourceFiles) == 1:
f.addDirectDependency(includedSourceFiles[0])
else:
raise Exception, 'Could not find include file %s' % (includeFileName)
# Check for cycles
print 'Checking for cyclic dependencies'
remainingFiles = set(fortranSourceFiles)
while len(remainingFiles) > 0:
checkedFiles = set()
fileStack = []
f = remainingFiles.pop()
areCyclic, failingFiles = f.dependenciesAreCyclic(fileStack, checkedFiles)
if areCyclic:
raise Exception('The following files have a cyclic dependency: %s' % (failingFiles))
else:
remainingFiles.difference_update(checkedFiles)
def __iter__(self):
return iter(self.sourceFiles)
class Target:
def __init__(self, targetInfoDict, buildRootDir, projectRoot, setupFunc = None):
self.targetInfoDict = targetInfoDict
self.isBuilt = False
self.lastConfig = None
self._sourceTree = None
self.buildRootDir = buildRootDir
self.targetDependencies = None
self.buildShouldStop = False
self.buildQueue = None
self.projectRoot = projectRoot
self.setupFuncTuple = (setupFunc,) # Using a tuple to avoid binding function to class
def mainProgramFile(self):
return self.targetInfoDict.get('mainprogramfile')
def sourceTree(self):
return self._sourceTree
def rootSourceDirectories(self):
return [os.path.join(self.projectRoot, d) for d in self.targetInfoDict['rootdirs']]
def buildSubDirectory(self):
return self.targetInfoDict['buildsubdir']
def name(self):
return self.targetInfoDict['name']
def executableName(self):
return self.targetInfoDict['exename']
def targetDependencies(self):
return self.targetDependencies
def moduleFilePath(self, buildConfig):
modulePath = [self.moduleFileDirectory(buildConfig)]
for t in self.targetDependencies:
modulePath.append(t.moduleFileDirectory(buildConfig))
return modulePath
def dependentLibraryNames(self):
names = [self.libraryName()]
for t in self.targetDependencies:
names.append(t.libraryName())
return names
def dependentLibraryPaths(self, buildConfig):
paths = [self.productLibraryPath(buildConfig)]
for t in self.targetDependencies:
paths.append(t.productLibraryPath(buildConfig))
return paths
def moduleFileDirectory(self, buildConfig):
return self.intermediateProductsDirectory(buildConfig)
def libraryName(self):
return self.targetInfoDict['libraryname']
def fullLibraryName(self):
return 'lib' + self.targetInfoDict['libraryname'] + '.a'
def compileGroups(self):
return self.targetInfoDict['compilegroups']
def buildRootDirectory(self):
"Absolute path to build root."
return self.buildRootDir
def productInstallDirectory(self, buildConfig):
return buildConfig.installDirectory()
def productLibraryDirectory(self, buildConfig):
return os.path.join(self.buildRootDirectory(), buildConfig.buildSubDirectory(), 'lib')
def productLibraryPath(self, buildConfig):
return os.path.join(self.productLibraryDirectory(buildConfig), self.fullLibraryName())
def productExecutableDirectory(self, buildConfig):
return os.path.join(self.buildRootDirectory(), buildConfig.buildSubDirectory(), 'bin')
def intermediateProductsDirectory(self, buildConfig):
return os.path.join(self.buildRootDir, self.buildSubDirectory() + '.build',
buildConfig.buildSubDirectory())
def updateTargetDependencies(self, allTargets):
self.targetDependencies = []
for targetName in self.targetInfoDict['dependson']:
target = [t for t in allTargets if t.name() == targetName][0]
self.targetDependencies.append(target)
def isFirstBuild(self, buildConfig):
return os.path.exists(self.intermediateProductsDirectory(buildConfig))
def build(self, buildConfig, allFileMetaData, clean, numThreads = 1, noDependencies = False):
if self.isBuilt and self.lastConfig == buildConfig: return True, 0
self.isBuilt = False
self.lastConfig = buildConfig
self.buildShouldStop = False
intermediatesDir = self.intermediateProductsDirectory(buildConfig)
if not os.path.exists(intermediatesDir):
os.makedirs(intermediatesDir)
dependenciesBuilt = True
numFilesBuilt = 0
for t in self.targetDependencies:
logging.debug('Building dependency target %s' % (t.name()))
dependenciesBuilt, n = t.build(buildConfig, allFileMetaData, clean, numThreads, noDependencies)
numFilesBuilt += n
if not dependenciesBuilt: break
fileMetaData = allFileMetaData.setdefault(buildConfig.name(),{}).setdefault(self.name(),{})
if clean: fileMetaData.clear()
if dependenciesBuilt and not self.buildShouldStop:
self.setBuildEnvironment(buildConfig)
self.isBuilt, n = self.compileSources(buildConfig, fileMetaData, numThreads, noDependencies)
numFilesBuilt += n
if self.isBuilt and 'exename' in self.targetInfoDict:
self.isBuilt = self.compileExecutable(buildConfig)
if not self.isBuilt:
print 'Failed to link executable for target %s' % (self.name())
return self.isBuilt, numFilesBuilt
def install(self, buildConfig):
import shutil
if 'exename' not in self.targetInfoDict: return
print 'Installing %s' % (self.name())
exeDir = os.path.join(self.projectRoot, self.productExecutableDirectory(buildConfig))
exePath = os.path.join(exeDir, self.executableName())
binDir = self.productInstallDirectory(buildConfig)
shutil.copy(exePath, binDir)
def stopBuild(self):
self.buildShouldStop = True
for t in self.targetDependencies:
t.stopBuild()
if self.buildQueue:
self.buildQueue.stopBuild()
def setBuildEnvironment(self, buildConfig):
os.environ['FORAY_TARGET_ROOT_DIRS'] = string.join([r'"%s"' % (d) for d in self.rootSourceDirectories()])
os.environ['FORAY_INTERMEDIATE_PRODUCTS_DIR'] = self.intermediateProductsDirectory(buildConfig)
os.environ['FORAY_LIBRARY_PRODUCTS_DIR'] = self.productLibraryDirectory(buildConfig)
os.environ['FORAY_EXECUTABLE_PRODUCTS_DIR'] = self.productExecutableDirectory(buildConfig)
os.environ['FORAY_INSTALL_DIR'] = self.productInstallDirectory(buildConfig)
def compileSources(self, buildConfig, fileMetaData, numThreads, noDependencies):
print 'Starting build for target "%s" with config "%s"' % (self.name(), buildConfig.name())
libDirPath = self.productLibraryDirectory(buildConfig)
if not os.path.exists(libDirPath):
os.makedirs(libDirPath)
if self.setupFuncTuple[0]:
self.setupFuncTuple[0](self.projectRoot,
self.rootSourceDirectories(),
self.intermediateProductsDirectory(buildConfig),
self.productLibraryDirectory(buildConfig),
self.productExecutableDirectory(buildConfig),
self.productInstallDirectory(buildConfig) )
if not self._sourceTree:
mainProgramFile = self.targetInfoDict.get('mainprogramfile')
self._sourceTree = SourceTree(self.rootSourceDirectories(),
[t.sourceTree() for t in self.targetDependencies],
self.projectRoot,
self.targetInfoDict['skipdirs'],
self.targetInfoDict['skipfiles'],
mainProgramFile,
noDependencies)
self.unarchiveBuildProducts(buildConfig, self._sourceTree.removedFiles(fileMetaData))
logging.debug('Updating file status')
self._sourceTree.updateFileStatus(fileMetaData, buildConfig, self)
libFilePath = os.path.join(self.productLibraryDirectory(buildConfig), self.fullLibraryName())
self.buildQueue = BuildQueue(self._sourceTree, buildConfig, self, libFilePath, numThreads)
success = False
numFilesBuilt = 0
if not self.buildShouldStop:
success, numFilesBuilt = self.buildQueue.buildSource()
if success and numFilesBuilt > 0:
# Run ranlib
indexLibCommand = buildConfig.indexLibraryCommand(self)
logging.debug('Indexing library: ' + indexLibCommand)
success, output, error = RunSubprocess(indexLibCommand)
if not success:
print 'ranlib failed'
print output
print error
self.buildQueue = None
self._sourceTree.updateFileMetaData(fileMetaData)
if success:
statusString = 'Compiled library'
elif self.buildShouldStop:
statusString = 'Compiling interrupted'
else:
statusString = 'Failed to build library'
print statusString + ' for target "%s" and config "%s"' % (self.name(), buildConfig.name())
return success, numFilesBuilt
def compileExecutable(self, buildConfig):
exeDirPath = self.productExecutableDirectory(buildConfig)
if not os.path.exists(exeDirPath): os.makedirs(exeDirPath)
os.chdir(exeDirPath)
exeCommand = buildConfig.linkExecutableCommand(self)
print 'Compiling executable for %s' % (self.name())
logging.debug('Compile command: %s' % (exeCommand))
success, output, error = RunSubprocess(exeCommand)
if not success:
if output: print output
if error: print error
return success
def archiveBuildProducts(self, buildConfig, sourceFiles):
print 'Archiving object files'
sourceFilesToArchive = [s for s in sourceFiles if not s.isMainProgram()]
if len(sourceFilesToArchive) == 0: return
command = buildConfig.archiveCommand(self, sourceFilesToArchive)
logging.debug('Archiving command: %s' % (command))
if command:
success, output, error = RunSubprocess(command)
if not success:
if output: print output
if error: print error
def unarchiveBuildProducts(self, buildConfig, sourceFilePaths):
"Removes object files corresponding to the project relative paths passed."
print 'Removing object files for which source files no longer exist'
sourceFiles = [self._sourceTree.createSourceFileForPath(p) for p in sourceFilePaths]
sourceFiles = [f for f in sourceFiles if f.generatesObjectFile()]
if len(sourceFiles) == 0: return
command = buildConfig.unarchiveCommand(self, sourceFiles)
logging.debug('Unarchiving command: %s' % (command))
if command:
success, output, error = RunSubprocess(command)
if not success:
if output: print output
if error: print error
class BuildConfig:
def __init__(self, configDict, projectRoot):
self.configDict = configDict
self.projectRoot = projectRoot
def name(self):
return self.configDict['name']
def installDirectory(self):
return ExpandShellArgs(self.configDict['installdir'])
def buildSubDirectory(self):
return self.configDict['buildsubdir']
def compileGroupForFile(self, target, sourceFile):
fileName = os.path.split(sourceFile.path())[1]
compileGroups = target.compileGroups()
fileGroup = 'default'
for groupName, fileNames in compileGroups.iteritems():
if fileName in fileNames:
fileGroup = groupName
break
return fileGroup
def modulePathOptions(self, target):
modulePath = target.moduleFilePath(self)
optionString = self.configDict['compileroptions']['modpathoption']
moduleString = ''
if len(modulePath) > 0:
moduleString = reduce( lambda x, y: '%s %s "%s"' % (x, optionString, y), modulePath, '' )
return moduleString
def linkLibraryOptions(self, target):
libraryPath = '-L"%s" ' % (target.productLibraryDirectory(self))
dependentLibraryNames = target.dependentLibraryNames()
dependentLibraryPaths = target.dependentLibraryPaths(self)
dependentLibraryNames = [l[0] for l in zip(dependentLibraryNames, dependentLibraryPaths) \
if os.path.exists(l[1])] # Filter non-existent libraries out
optionsString = ''
if len(dependentLibraryNames) > 0:
optionsString = reduce( lambda x, y: '%s -l%s' % (x, y), dependentLibraryNames, libraryPath )
return optionsString
def fortranCompileCommand(self, target, sourceFile, compilerKey, flagsKey):
compilerOptionsDict = self.configDict['compileroptions']
compileGroup = self.compileGroupForFile(target, sourceFile)
compileGroupFlags = compilerOptionsDict['compilegroupflags'][compileGroup]
compiler = compilerOptionsDict[compilerKey]
flags = compilerOptionsDict[flagsKey]
modPathFlags = self.modulePathOptions(target)
sourceFilePath = os.path.join(self.projectRoot, sourceFile.preprocessedFilePath())
return '%s %s %s %s "%s"' % (compiler, flags, compileGroupFlags, modPathFlags, sourceFilePath)
def fortran77CompileCommand(self, target, sourceFile):
return self.fortranCompileCommand(target, sourceFile, 'f77compiler', 'f77flags')
def fortran90CompileCommand(self, target, sourceFile):
return self.fortranCompileCommand(target, sourceFile, 'f90compiler', 'f90flags')
def cCompileCommand(self, target, sourceFile):
compilerOptionsDict = self.configDict['compileroptions']
compileGroup = self.compileGroupForFile(target, sourceFile)
compileGroupFlags = compilerOptionsDict['compilegroupflags'][compileGroup]
compiler = compilerOptionsDict['ccompiler']
flags = compilerOptionsDict['cflags']
sourceFilePath = os.path.join(self.projectRoot, sourceFile.preprocessedFilePath())
return '%s %s %s "%s"' % (compiler, flags, compileGroupFlags, sourceFilePath)
def archiveCommand(self, target, sourceFiles):
"""
Should only be called once object files have been created, because
it checks for their existence.
"""
libPath = target.productLibraryPath(self)
intermedPath = target.intermediateProductsDirectory(self)
paths = [s.objectFileName() for s in sourceFiles if
s.generatesObjectFile() and
os.path.exists(os.path.join(intermedPath,s.objectFileName()))]
paths = string.join(paths)
if len(paths) == 0: return None
changeDirCommand = 'cd "%s"' % (intermedPath)
arCommand = self.configDict['compileroptions']['archivecommand']
arCommand = '%s "%s" %s' % (arCommand, libPath, paths)
removeCommand = 'rm ' + paths
return string.join([changeDirCommand, arCommand, removeCommand], ' ; ')
def unarchiveCommand(self, target, sourceFiles):
libPath = target.productLibraryPath(self)
objects = string.join([s.objectFileName() for s in sourceFiles if s.generatesObjectFile()])
if len(objects) == 0: return None
unarchCommand = self.configDict['compileroptions']['unarchivecommand']
unarchCommand = '%s "%s" %s' % (unarchCommand, libPath, objects)
return unarchCommand
def indexLibraryCommand(self, target):
libPath = target.productLibraryPath(self)
ranlibCommand = self.configDict['compileroptions']['ranlibcommand']
return 'if [ -e "%s" ]; then %s "%s" ; fi' % (libPath, ranlibCommand, libPath)
def linkExecutableCommand(self, target):
mainProgramFileName = target.mainProgramFile()
mainSourceFile = target.sourceTree().sourceFileWithName(mainProgramFileName)
mainObjectName = mainSourceFile.objectFileName()
intermedPath = target.intermediateProductsDirectory(self)
mainObjectPath = os.path.join(intermedPath, mainObjectName)
exeName = target.executableName()
libs = self.linkLibraryOptions(target)
c = self.configDict['compileroptions']
linkCommand = '%s %s -o %s "%s" %s %s %s' % \
(c['link'], c['linkflags'], exeName, mainObjectPath, c['prioritylibs'], libs, c['otherlibs'])
return linkCommand
class BuildQueue:
"""
This class schedules file compilations. It takes account of dependencies, and
is optimized for working on parallel systems.
"""
def __init__(self, sourceTree, buildConfig, target, libFilePath, numThreads):
import threading
self.sourceTree = sourceTree
self.buildConfig = buildConfig
self.target = target
self.libFilePath = libFilePath
self.numParallelThreads = numThreads
self.buildableSourceFilesLock = threading.Lock()
self.builtSourceFilesLock = threading.Lock()
self.buildShouldStop = False
def buildSourceFilesInThread(self):
def getNextSourceFile():
self.buildableSourceFilesLock.acquire()
if len(self.buildableSourceFiles) > 0:
f = self.buildableSourceFiles.pop()
else:
f = None
self.buildableSourceFilesLock.release()
return f
try:
f = getNextSourceFile()
while f and not self.buildShouldStop:
success = f.build()
if success:
self.builtSourceFilesLock.acquire()
self.builtSourceFiles.append(f)
self.builtSourceFilesLock.release()
f = getNextSourceFile()
else:
self.buildShouldStop = True
except Exception, e:
print 'An error occurred: ', e
self.buildShouldStop = True
ArchiveThreshold = 30
def buildSource(self):
import threading
self.buildShouldStop = False
self.buildableSourceFiles = self.sourceTree.buildableSourceFiles()
numFilesBuilt = 0
numFilesBuiltSinceLastArchive = 0
self.builtSourceFiles = []
while len(self.buildableSourceFiles) > 0 and not self.buildShouldStop:
numBuiltBefore = len(self.builtSourceFiles)
threads = []
for threadIndex in range(self.numParallelThreads):
threads.append( threading.Thread(target=self.buildSourceFilesInThread) )
threads[-1].start()
for thread in threads:
thread.join()
numBuiltThisRound = len(self.builtSourceFiles) - numBuiltBefore
numFilesBuilt += numBuiltThisRound
numFilesBuiltSinceLastArchive += numBuiltThisRound
if numFilesBuiltSinceLastArchive >= BuildQueue.ArchiveThreshold:
self.target.archiveBuildProducts(self.buildConfig, self.builtSourceFiles)
numFilesBuiltSinceLastArchive = 0
self.builtSourceFiles = []
if not self.buildShouldStop:
self.buildableSourceFiles = self.sourceTree.buildableSourceFiles()
self.target.archiveBuildProducts(self.buildConfig, self.builtSourceFiles)
return (not self.buildShouldStop), numFilesBuilt
def stopBuild(self):
self.buildShouldStop = True
class Builder:
def __init__(self, buildInfo, targetNames, configNames, projectRoot, numThreads, clean, noDependencies, debug):
# Store ivars
self.buildInfo = buildInfo
self.projectRoot = projectRoot
self.numThreads = numThreads
self.clean = clean
self.noDependencies = noDependencies
self.buildCancelled = False
self.debug = debug
# Setup build configurations
self.buildConfigsToBuild = []
if not configNames or len(configNames) == 0:
configNames = [self.buildInfo['defaultconfig']] #default
for configName in configNames:
configDicts = [d for d in self.buildInfo['configs'] if d['name'] == configName]
if len(configDicts) != 1:
raise Exception, 'Invalid configuration %s' % (configName)
configDict = configDicts[0]
flattenedConfigDict = self.flattenConfigInheritance(configDict)
logging.debug('Flattened config dict for %s: %s' % (configDict['name'], str(flattenedConfigDict)))
buildConfig = BuildConfig(flattenedConfigDict, self.projectRoot)
self.buildConfigsToBuild.append(buildConfig)
# Setup targets
self.currentTarget = None
self.allTargets = []
self.targetsToBuild = []
if not targetNames or len(targetNames) == 0:
targetNames = [t['name'] for t in self.buildInfo['targets']] # use all
for targetDict in self.buildInfo['targets']:
target = Target(targetDict, ExpandShellArgs(self.buildInfo['builddir']), self.projectRoot,
self.buildInfo.get('firstbuildfunc') )
self.allTargets.append(target)
if targetDict['name'] in targetNames:
self.targetsToBuild.append(target)
for target in self.allTargets:
target.updateTargetDependencies(self.allTargets)
def flattenConfigInheritance(self, configDict):
import copy
def recursiveUpdate(dictToUpdate, dictToUpdateWith):
"Recursively update a tree of dictionaries"
for key,value in dictToUpdateWith.iteritems():
if key in dictToUpdate and isinstance(value, dict):
recursiveUpdate(dictToUpdate[key], dictToUpdateWith[key])
else:
dictToUpdate[key] = copy.deepcopy(value)
def inheritFromDict(inheritingDict, resultDict):
"Inherit the contents of one dictionary in another"
if 'inherits' in inheritingDict:
configName = inheritingDict['inherits']
inheritedDict = [d for d in self.buildInfo['configs'] if d['name'] == configName][0]
inheritFromDict(inheritedDict, resultDict)
recursiveUpdate(resultDict, inheritingDict)
flattenedDict = {}
inheritFromDict(configDict, flattenedDict)
return flattenedDict
def build(self):
import pprint, pickle
# read old fileMetaData
fileMetaDataFilePath = os.path.join(ExpandShellArgs(self.buildInfo['builddir']), 'foray.fileMetaData')
allFileMetaData = None
if os.path.exists(fileMetaDataFilePath):
f = open(fileMetaDataFilePath, 'rb')
allFileMetaData = pickle.load(f)
f.close()
if not allFileMetaData:
allFileMetaData = {}
# Build each combination of target and config
success = True
allTargetsDict = dict([(target.name(), target) for target in self.allTargets])
for config in self.buildConfigsToBuild:
if 'prepareconfigfunc' in self.buildInfo:
self.buildInfo['prepareconfigfunc'](config.name())
for target in self.targetsToBuild:
self.currentTarget = target
success, numFilesBuilt = target.build(config, allFileMetaData, self.clean, self.numThreads,
self.noDependencies)
if numFilesBuilt > 0:
print 'Storing file meta data'
if self.debug:
f = open(fileMetaDataFilePath + '.debug', 'w')
pprint.pprint(allFileMetaData, f)
f.close()
f = open(fileMetaDataFilePath, 'wb')
pickle.dump(allFileMetaData, f)
f.close()
if not success: break
if not success: break
self.currentTarget = None
return success
def install(self):
"Installs the target products in the respective bin directories."
for t in self.targetsToBuild:
for b in self.buildConfigsToBuild:
t.install(b)
def handleStopSignal(self, signalNum, frame):
self.buildCancelled = True
if self.currentTarget: self.currentTarget.stopBuild()
# -----------------
# Main program
# -----------------
# Read environment variables
numThreads = os.environ.setdefault('FORAY_NUM_THREADS', '1')
# Parse input arguments
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-d", "--debug", action="store_true", dest="debug", help="Print debug info.",
default=False)
parser.add_option("-c", "--clean", action="store_true", dest="clean", help="Whether to rebuild completely.",
default=False)
parser.add_option("-j", "--threads", type="int", dest="numthreads", help="The number of threads to use.",
default=int(numThreads))
parser.add_option("-i", "--buildinfo", type="string", dest="buildinfofile", help="The build info file name.",
default='buildinfo')
parser.add_option("-b", "--buildconfig", type="string", action="append", dest="configs",
help="The configuration to build.")
parser.add_option("-n", "--nodepends", action="store_true", dest="nodepends", help="Do not account for file dependencies.",
default=False)
(options, targets) = parser.parse_args()
# Debugging
if options.debug: logging.getLogger('').setLevel(logging.DEBUG)
# Build info
import signal
buildinfoGlobals = {}
execfile(options.buildinfofile, buildinfoGlobals)
buildInfo = buildinfoGlobals['buildinfo']
# File types
FortranSourceFile.configure(buildInfo.get('fortranfiles'))
CSourceFile.configure(buildInfo.get('cfiles'))
# Project root
if 'projectroot' in buildInfo:
projectRoot = ExpandShellArgs(buildInfo['projectroot'])
else:
projectRoot = os.getcwd()
os.environ['FORAY_PROJECT_ROOT'] = projectRoot
# Create builder and build
builder = Builder(buildInfo, targets, options.configs, projectRoot, options.numthreads,
options.clean, options.nodepends, options.debug)
signal.signal(signal.SIGINT, builder.handleStopSignal)
signal.signal(signal.SIGQUIT, builder.handleStopSignal)
signal.signal(signal.SIGABRT, builder.handleStopSignal)
if options.debug:
if builder.build(): builder.install()
else:
try:
if builder.build():
builder.install()
except Exception, e:
print 'Foray Error: ' + str(e.args[0])
# ------------------------------------------------------------------------------------
# Copyright (c) 2008, Drew McCormack
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
# Neither the name of the Vrije Universiteit (Amsterdam) nor the names of its
# contributors may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.=======
| Python |
# coding: utf-8
# Django settings for forca project.
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
('Bruno Marques', 'bomarques@inf.ufrgs.br'),
('Tiago Rosa', 'trsilva@inf.ufrgs.br')
)
MANAGERS = ADMINS
DATABASE_ENGINE = 'sqlite3' # 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
DATABASE_NAME = 'teste.db' # Or path to database file if using sqlite3.
DATABASE_USER = '' # Not used with sqlite3.
DATABASE_PASSWORD = '' # Not used with sqlite3.
DATABASE_HOST = '' # Set to empty string for localhost. Not used with sqlite3.
DATABASE_PORT = '' # Set to empty string for default. Not used with sqlite3.
TIME_ZONE = 'America/Sao_Paulo'
LANGUAGE_CODE = 'pt-br'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
MEDIA_URL = ''
# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/".
ADMIN_MEDIA_PREFIX = '/media/'
# Make this unique, and don't share it with anybody.
SECRET_KEY = '-+l-z6$i^)o#ev08s39ub=ycwg3d(kt2_(b+54z&-6(w+qygcj'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.load_template_source',
'django.template.loaders.app_directories.load_template_source',
# 'django.template.loaders.eggs.load_template_source',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
)
ROOT_URLCONF = 'forca.urls'
TEMPLATE_DIRS = (
'C:/Documents and Settings/Bruno/Meus documentos/Programação/forca/templates/'
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.admin',
'django.contrib.admindocs',
'forca',
'registration'
)
LOGIN_URL = '/login/'
LOGIN_REDIRECT_URL = '/' # mudar pra página do perfil quando ela estiver pronta?
ACCOUNT_ACTIVATION_DAYS = 7 | Python |
Subsets and Splits
SQL Console for ajibawa-2023/Python-Code-Large
Provides a useful breakdown of language distribution in the training data, showing which languages have the most samples and helping identify potential imbalances across different language groups.