code stringlengths 1 1.72M | language stringclasses 1 value |
|---|---|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'ListMembership.rank'
db.delete_column('lists_listmembership', 'rank')
def backwards(self, orm):
# Adding field 'ListMembership.rank'
db.add_column('lists_listmembership', 'rank', self.gf('django.db.models.fields.IntegerField')(default=0), keep_default=False)
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'lists.item': {
'Meta': {'object_name': 'Item'},
'content': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '140'})
},
'lists.list': {
'Meta': {'object_name': 'List'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'items': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['lists.Item']", 'through': "orm['lists.ListMembership']", 'symmetrical': 'False'}),
'ordering': ('django.db.models.fields.CommaSeparatedIntegerField', [], {'default': "''", 'max_length': '100'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '140'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'lists.listmembership': {
'Meta': {'object_name': 'ListMembership'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'item': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lists.Item']"}),
'list': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lists.List']"})
},
'lists.photo': {
'Meta': {'object_name': 'Photo'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'num_views': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'original_image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'})
},
'lists.photoitem': {
'Meta': {'object_name': 'PhotoItem', '_ormbases': ['lists.Photo']},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'photo_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['lists.Photo']", 'unique': 'True', 'primary_key': 'True'})
}
}
complete_apps = ['lists']
| Python |
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'List.ordering'
db.add_column('lists_list', 'ordering', self.gf('django.db.models.fields.CommaSeparatedIntegerField')(default='', max_length=100), keep_default=False)
def backwards(self, orm):
# Deleting field 'List.ordering'
db.delete_column('lists_list', 'ordering')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'lists.item': {
'Meta': {'object_name': 'Item'},
'content': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '140'})
},
'lists.list': {
'Meta': {'object_name': 'List'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'items': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['lists.Item']", 'through': "orm['lists.ListMembership']", 'symmetrical': 'False'}),
'ordering': ('django.db.models.fields.CommaSeparatedIntegerField', [], {'default': "''", 'max_length': '100'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '140'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'lists.listmembership': {
'Meta': {'object_name': 'ListMembership'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'item': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lists.Item']"}),
'list': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lists.List']"}),
'rank': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'lists.photo': {
'Meta': {'object_name': 'Photo'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'num_views': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'original_image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'})
},
'lists.photoitem': {
'Meta': {'object_name': 'PhotoItem', '_ormbases': ['lists.Photo']},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'photo_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['lists.Photo']", 'unique': 'True', 'primary_key': 'True'})
}
}
complete_apps = ['lists']
| Python |
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Photo'
db.create_table('lists_photo', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('original_image', self.gf('django.db.models.fields.files.ImageField')(max_length=100)),
('num_views', self.gf('django.db.models.fields.PositiveIntegerField')(default=0)),
))
db.send_create_signal('lists', ['Photo'])
def backwards(self, orm):
# Deleting model 'Photo'
db.delete_table('lists_photo')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'lists.item': {
'Meta': {'object_name': 'Item'},
'content': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '140'})
},
'lists.list': {
'Meta': {'object_name': 'List'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'items': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['lists.Item']", 'through': "orm['lists.ListMembership']", 'symmetrical': 'False'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '140'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'lists.listmembership': {
'Meta': {'object_name': 'ListMembership'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'item': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lists.Item']"}),
'list': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lists.List']"}),
'rank': ('django.db.models.fields.IntegerField', [], {})
},
'lists.photo': {
'Meta': {'object_name': 'Photo'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'num_views': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'original_image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'})
}
}
complete_apps = ['lists']
| Python |
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'NumberedItem'
db.create_table('lists_numbereditem', (
('item_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['lists.Item'], unique=True, primary_key=True)),
))
db.send_create_signal('lists', ['NumberedItem'])
def backwards(self, orm):
# Deleting model 'NumberedItem'
db.delete_table('lists_numbereditem')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'lists.item': {
'Meta': {'object_name': 'Item'},
'content': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '140'})
},
'lists.list': {
'Meta': {'object_name': 'List'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'items': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['lists.Item']", 'through': "orm['lists.ListMembership']", 'symmetrical': 'False'}),
'ordering': ('django.db.models.fields.CommaSeparatedIntegerField', [], {'default': "''", 'max_length': '100'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '140'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'lists.listmembership': {
'Meta': {'object_name': 'ListMembership'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'item': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lists.Item']"}),
'list': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lists.List']"})
},
'lists.numbereditem': {
'Meta': {'object_name': 'NumberedItem', '_ormbases': ['lists.Item']},
'item_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['lists.Item']", 'unique': 'True', 'primary_key': 'True'})
},
'lists.photo': {
'Meta': {'object_name': 'Photo'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'num_views': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'original_image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'})
},
'lists.photoitem': {
'Meta': {'object_name': 'PhotoItem', '_ormbases': ['lists.Photo']},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'photo_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['lists.Photo']", 'unique': 'True', 'primary_key': 'True'})
}
}
complete_apps = ['lists']
| Python |
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Item'
db.create_table('lists_item', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=140)),
('content', self.gf('django.db.models.fields.TextField')()),
))
db.send_create_signal('lists', ['Item'])
# Adding model 'List'
db.create_table('lists_list', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'], null=True, blank=True)),
('title', self.gf('django.db.models.fields.CharField')(max_length=140)),
))
db.send_create_signal('lists', ['List'])
# Adding model 'ListMembership'
db.create_table('lists_listmembership', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('item', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['lists.Item'])),
('list', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['lists.List'])),
('rank', self.gf('django.db.models.fields.IntegerField')()),
))
db.send_create_signal('lists', ['ListMembership'])
def backwards(self, orm):
# Deleting model 'Item'
db.delete_table('lists_item')
# Deleting model 'List'
db.delete_table('lists_list')
# Deleting model 'ListMembership'
db.delete_table('lists_listmembership')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'lists.item': {
'Meta': {'object_name': 'Item'},
'content': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '140'})
},
'lists.list': {
'Meta': {'object_name': 'List'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'items': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['lists.Item']", 'through': "orm['lists.ListMembership']", 'symmetrical': 'False'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '140'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'lists.listmembership': {
'Meta': {'object_name': 'ListMembership'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'item': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lists.Item']"}),
'list': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lists.List']"}),
'rank': ('django.db.models.fields.IntegerField', [], {})
}
}
complete_apps = ['lists']
| Python |
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'PhotoItem'
db.create_table('lists_photoitem', (
('photo_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['lists.Photo'], unique=True, primary_key=True)),
('content_type', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['contenttypes.ContentType'])),
('object_id', self.gf('django.db.models.fields.PositiveIntegerField')()),
))
db.send_create_signal('lists', ['PhotoItem'])
def backwards(self, orm):
# Deleting model 'PhotoItem'
db.delete_table('lists_photoitem')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'lists.item': {
'Meta': {'object_name': 'Item'},
'content': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '140'})
},
'lists.list': {
'Meta': {'object_name': 'List'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'items': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['lists.Item']", 'through': "orm['lists.ListMembership']", 'symmetrical': 'False'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '140'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'lists.listmembership': {
'Meta': {'object_name': 'ListMembership'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'item': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lists.Item']"}),
'list': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lists.List']"}),
'rank': ('django.db.models.fields.IntegerField', [], {})
},
'lists.photo': {
'Meta': {'object_name': 'Photo'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'num_views': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'original_image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'})
},
'lists.photoitem': {
'Meta': {'object_name': 'PhotoItem', '_ormbases': ['lists.Photo']},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'photo_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['lists.Photo']", 'unique': 'True', 'primary_key': 'True'})
}
}
complete_apps = ['lists']
| Python |
from django.conf.urls.defaults import *
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('lists.views',
(r'^$', 'home'),
(r'^(\w+)$', 'profile'),
(r'^api/upload/photo/list/(\d*)$', 'api_upload_list_photo'),
(r'^api/list/(\d*)$', 'api_list'),
(r'^api/list/(\d+)/items/(\d*)$', 'api_list_items'),
(r'^api/item/(\d+)$', 'api_item'),
(r'^api/user/(\w+)/lists$', 'api_user_lists'),
(r'^api/list/(\d+)/photos/(\d*)$', 'api_list_photos')) | Python |
class SubdomainMiddleware:
def process_request(self, request):
"""Parse out the subdomain from the request"""
request.subdomain = None
host = request.META.get('HTTP_HOST', '')
host_s = host.replace('www.', '').split('.')
if len(host_s) > 2:
request.subdomain = ''.join(host_s[:-2])
| Python |
from django.contrib import admin
from django.contrib.contenttypes import generic
from lists.models import List, Item, ListMembership, Photo, PhotoItem, OList
from tagging.models import GenericTag
admin.site.register(Item)
admin.site.register(ListMembership)
admin.site.register(Photo)
admin.site.register(PhotoItem)
admin.site.register(OList)
class PhotoItemInline(generic.GenericTabularInline):
model = PhotoItem
class TagInline(generic.GenericTabularInline):
model = GenericTag
class ListAdmin(admin.ModelAdmin):
inlines = [
TagInline,
PhotoItemInline,
]
admin.site.register(List,ListAdmin) | Python |
from django.http import HttpResponse
from django.template import RequestContext
from django.shortcuts import render_to_response, get_object_or_404
from lists.models import List, Item, ListMembership, PhotoItem
from django.conf import settings
from django.contrib.auth.models import User
from django.contrib.auth.decorators import login_required
from django.views.decorators.csrf import csrf_exempt
from django.utils.simplejson import dumps, loads, JSONEncoder
from django.contrib.auth import authenticate, login
from django.contrib.auth.forms import UserCreationForm
from django.http import HttpResponseRedirect
import os
import sys
def home(request):
variables = RequestContext(request)
if request.subdomain == "food":
return render_to_response('lists/food.html', variables)
return render_to_response('lists/main.html', variables)
def profile(request, username):
user = get_object_or_404(User, username=username)
variables = RequestContext(request, { "my_user" : user, "username" : username })
if not request.user.is_authenticated():
# show public view of profile
return render_to_response('lists/public.html', variables)
elif request.user.is_authenticated():
if request.user.username != username:
return render_to_response('lists/public.html', variables)
return render_to_response('lists/profile.html', variables)
def register(request):
if request.method == 'POST':
form = UserCreationForm(request.POST)
if form.is_valid():
new_user = form.save()
new_user = authenticate(username=new_user.username, password=form.cleaned_data["password1"])
login(request, new_user)
return HttpResponseRedirect("/" + new_user.username)
else:
form = UserCreationForm()
variables = RequestContext(request, { "form": form })
return render_to_response("registration/register.html", variables)
@csrf_exempt
def api_upload_list_photo(request, list_id):
list = get_object_or_404(List, id=list_id)
if request.method == 'POST':
post = request.POST
name, ext = os.path.splitext(post['name'])
dest_path = "{upload_url}{ds}{file}{ext}". \
format(upload_url=settings.IMAGE_UPLOAD_URL, ds=os.sep, file=name, ext=ext)
print settings.IMAGE_UPLOAD_URL
uploaded_file = request.FILES['file']
chunk = request.REQUEST.get('chunk', '0')
chunks = request.REQUEST.get('chunks', '0')
#try:
# with open("/home/kliao/code/django/spindle" + dest_path, ('wb' if chunk == '0' else 'ab')) as f:
# for content in uploaded_file.chunks():
# f.write(content)
#except Exception,e:
# print "hahahaa", e
#file uploaded, save to db
if int(chunk) + 1 >= int(chunks):
try:
p = PhotoItem(content_object=list)
p.original_image.save(dest_path, uploaded_file, True)
except Exception,e:
print "sljfsakf", e
return HttpResponse("ok", mimetype="text/plain")
def api_list_photos(request, list_id, photo_id):
if request.method == 'GET':
list = get_object_or_404(List, id=list_id)
array_of_urls = [{"id": x.id, "thumb_url": x.thumbnail_image.url, "original_img_url": x.original_image.url} for x in list.photos.all()]
json = dumps(array_of_urls, cls=JSONEncoder)
return HttpResponse(json, mimetype='application/json')
elif request.method == 'DELETE':
photo = get_object_or_404(PhotoItem, id=photo_id)
photo.delete();
return HttpResponse("Ok")
return HttpResponse("Ok")
def api_user_lists(request, username):
the_user = User.objects.get(username=username)
lists = List.objects.filter(user=the_user)
array_of_dicts = [{"id": x.pk, "title": x.title, "ordering": x.ordering} for x in lists]
json = dumps(array_of_dicts, cls=JSONEncoder)
return HttpResponse(json, mimetype='application/json')
def api_list_items(request, list_id, item_id):
print "api_list_items"
if request.method == 'GET':
print "get"
lst = List.objects.get(id=list_id)
array_of_dicts = [{"id": x.pk, "name": x.name, "content": x.content, "done": False} for x in lst.items.all()]
json = dumps(array_of_dicts, cls=JSONEncoder)
return HttpResponse(json, mimetype='application/json')
elif request.method == 'POST':
print "post"
json = loads(request.raw_post_data)
item = Item.objects.create(name=json['name'], content=json['content'])
lst = List.objects.get(id=list_id)
ListMembership.objects.create(item=item, list=lst)
dict = {"id": item.pk, "name": item.name, "content": item.content, "done": json['done']}
json = dumps(dict, cls=JSONEncoder)
return HttpResponse(json, mimetype='application/json')
elif request.method == 'PUT':
print "put"
json = loads(request.raw_post_data)
item = Item.objects.get(id=item_id)
item.name = json['name']
item.content = json['content']
item.save()
dict = {"id": item.pk, "name": item.name, "content": item.content, "done": json['done']}
print dict
json = dumps(dict, cls=JSONEncoder)
return HttpResponse(json, mimetype='application/json')
elif request.method == 'DELETE':
Item.objects.get(id=item_id).delete()
lst = List.objects.get(id=list_id)
lst.save()
return HttpResponse("OK")
def api_list(request, id):
print "api_list"
if request.method == 'GET':
lst = List.objects.get(id=id)
dict = {"id": lst.pk, "title": lst.title, "ordering": lst.ordering}
json = dumps(dict, cls=JSONEncoder)
return HttpResponse(json, mimetype='application/json')
elif request.method == 'POST':
# maybe use django serializer here
json = loads(request.raw_post_data)
lst = List.objects.create(title=json['title'], user=request.user)
dict = {"id": lst.pk, "title": lst.title}
json = dumps(dict, cls=JSONEncoder)
return HttpResponse(json, mimetype='application/json')
elif request.method == 'PUT':
json = loads(request.raw_post_data)
list = List.objects.get(id=id)
list.title = json['title']
print json['ordering']
list.ordering = json['ordering']
list.save()
elif request.method == 'DELETE':
list = List.objects.get(id=id)
list.delete()
return HttpResponse("OK")
def api_item(request, id):
print "api_item"
if request.method == 'GET':
item = Item.objects.get(id=id)
dict = {"id": item.pk, "name": item.name, "content": item.content, "done": False}
json = dumps(dict, cls=JSONEncoder)
return HttpResponse(json, mimetype='application/json')
elif request.method == 'POST':
# maybe use django serializer here
json = loads(request.raw_post_data)
Item.objects.create(name=json.name, content=json.content)
elif request.method == 'PUT':
json = loads(request.raw_post_data)
item = Item.objects.get(id=json['id'])
print item.name
elif request.method == 'DELETE':
pass
return HttpResponse("OK") | Python |
#!/usr/bin/env python
from django.core.management import execute_manager
import imp
try:
imp.find_module('settings') # Assumed to be in the same directory.
except ImportError:
import sys
sys.stderr.write("Error: Can't find the file 'settings.py' in the directory containing %r. It appears you've customized things.\nYou'll have to run django-admin.py, passing it your settings module.\n" % __file__)
sys.exit(1)
import settings
if __name__ == "__main__":
execute_manager(settings)
| Python |
#!/usr/bin/env python
from django.core.management import execute_manager
import imp
try:
imp.find_module('settings') # Assumed to be in the same directory.
except ImportError:
import sys
sys.stderr.write("Error: Can't find the file 'settings.py' in the directory containing %r. It appears you've customized things.\nYou'll have to run django-admin.py, passing it your settings module.\n" % __file__)
sys.exit(1)
import settings
if __name__ == "__main__":
execute_manager(settings)
| Python |
from django.conf.urls.defaults import patterns, include, url
import os
from django.conf import settings
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# Examples:
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
url(r'^admin/', include(admin.site.urls)),
(r'^login/$', 'django.contrib.auth.views.login'),
(r'^logout/$', 'django.contrib.auth.views.logout'),
(r'^register/$', 'lists.views.register'),
(r'^', include('lists.urls')),
)
if settings.DEBUG:
urlpatterns += patterns('',
url(r'^media/(?P<path>.*)$', 'django.views.static.serve', {
'document_root': settings.MEDIA_ROOT,
}),
)
| Python |
# Django settings for spindle project.
import os
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'postgresql_psycopg2', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'spindle_db', #'/home/kliao/code/django/spindle/sqlite.db', # Or path to database file if using sqlite3.
'USER': 'postgres', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
# Added for convenience
SITE_ROOT = os.path.abspath(os.path.dirname(__file__))
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = os.path.join(SITE_ROOT, 'upload')
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = '/media/'
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# URL prefix for admin static files -- CSS, JavaScript and images.
# Make sure to use a trailing slash.
# Examples: "http://foo.com/static/admin/", "/static/admin/".
ADMIN_MEDIA_PREFIX = '/static/admin/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(SITE_ROOT, 'static'),
MEDIA_ROOT,
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = '*4z$b7lbu@huj*-4qvxiu5*%am2_qcy_mcxuiwdje7@ut=xr+!'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'lists.middleware.SubdomainMiddleware'
#'lists.views.SerializationMiddleware',
)
ROOT_URLCONF = 'spindle.urls'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(SITE_ROOT, 'templates')
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Uncomment the next line to enable the admin:
'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
'django.contrib.gis',
'imagekit',
'south',
'lists',
'common',
'tagging'
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
IMAGE_UPLOAD_URL = os.path.join(MEDIA_ROOT, 'images') | Python |
from django.db import models
from common.models import NameSlugModel
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes import generic
class TagManager(models.Manager):
def get_by_natural_key(self, name):
return self.get(name=name)
class Tag(NameSlugModel):
related_tags = models.ManyToManyField('self', blank=True, symmetrical=False, through='Tag_Relation')
objects = TagManager()
def natural_key(self):
return {'name' : self.name.replace("'", " & # 8 s "), 'id' : self.pk}
def rec_lookup(self, f, st):
for obj in f():
st.add(obj)
obj.rec_lookup(getattr(obj, f.__name__), st)
def sub_types(self):
return set([rel.source for rel in self.source_relation_set.all() if rel.is_a])
def super_types(self):
return set([rel.target for rel in self.target_relation_set.all() if rel.is_a])
def sub_objects(self):
return set([rel.target for rel in self.target_relation_set.all() if rel.has_a])
def super_objects(self):
return set([rel.source for rel in self.source_relation_set.all() if rel.has_a])
def has_related_tags(self):
return self.sub_types or self.super_types or self.sub_objects or self.super_objects
def rec_sub_types(self):
st = set([])
self.rec_lookup(self.sub_types, st)
return st
def rec_super_types(self):
st = set([])
self.rec_lookup(self.super_types, st)
return st
def rec_sub_objects(self):
st = set([])
self.rec_lookup(self.sub_objects, st)
return st
def rec_super_objects(self):
st = set([])
self.rec_lookup(self.super_objects, st)
return st
def all_related_tags(self):
return self.rec_sub_types() | self.rec_super_types() | self.rec_sub_objects() | self.rec_super_objects()
def add_sub_type(self, sub_type):
return Tag_Relation(source=sub_type, target=self, is_a=True)
def add_super_type(self, super_type):
return Tag_Relation(source=self, target=super_type, is_a=True)
def add_sub_object(self, sub_object):
return Tag_Relation(source=self, target=sub_object, has_a=True)
def add_super_object(self, super_object):
return Tag_Relation(source=super_object, target=self, has_a=True)
def __unicode__(self):
return self.name
class Tag_Relation(models.Model):
source = models.ForeignKey(Tag, related_name='target_relation_set')
target = models.ForeignKey(Tag, related_name='source_relation_set')
is_a = models.BooleanField(default=False); # True if source is a target
has_a = models.BooleanField(default=False); # True if source has a target
class Meta:
unique_together = ("source", "target")
def __unicode__(self):
if self.is_a:
return self.source.name + " is a type of " + self.target.name
elif self.has_a:
return self.source.name + " consists of " + self.target.name
else:
return "error"
class GenericTag(Tag):
content_type = models.ForeignKey(ContentType)
object_id = models.PositiveIntegerField()
content_object = generic.GenericForeignKey()
| Python |
"""
This file demonstrates writing tests using the unittest module. These will pass
when you run "manage.py test".
Replace this with more appropriate tests for your application.
"""
from django.test import TestCase
class SimpleTest(TestCase):
def test_basic_addition(self):
"""
Tests that 1 + 1 always equals 2.
"""
self.assertEqual(1 + 1, 2)
| Python |
# Create your views here.
| Python |
bind = "127.0.0.1:8888"
logfile = "/home/ubuntu/code/spindle/gunicorn.log"
workers = 3
| Python |
from django.db import models
from django.template.defaultfilters import slugify
from django.db import IntegrityError, transaction
class NameSlugModel(models.Model):
name = models.CharField(max_length=255)
slug = models.SlugField(unique=True, editable=False)
class Meta:
abstract = True
def save(self, *args, **kwargs):
"""
Based on the Tag save() method in django-taggit, this method simply
stores a slugified version of the name, ensuring that the unique
constraint is observed
"""
self.slug = slug = slugify(self.name)
i = 0
while True:
try:
savepoint = transaction.savepoint()
res = super(NameSlugModel, self).save(*args, **kwargs)
transaction.savepoint_commit(savepoint)
return res
except IntegrityError:
transaction.savepoint_rollback(savepoint)
i += 1
self.slug = '%s_%d' % (slug, i)
| Python |
"""
This file demonstrates writing tests using the unittest module. These will pass
when you run "manage.py test".
Replace this with more appropriate tests for your application.
"""
from django.test import TestCase
class SimpleTest(TestCase):
def test_basic_addition(self):
"""
Tests that 1 + 1 always equals 2.
"""
self.assertEqual(1 + 1, 2)
| Python |
# Create your views here.
| Python |
#!/usr/bin/env python
from django.core.management import execute_manager
import imp
try:
imp.find_module('settings') # Assumed to be in the same directory.
except ImportError:
import sys
sys.stderr.write("Error: Can't find the file 'settings.py' in the directory containing %r. It appears you've customized things.\nYou'll have to run django-admin.py, passing it your settings module.\n" % __file__)
sys.exit(1)
import settings
if __name__ == "__main__":
execute_manager(settings)
| Python |
from django.core import serializers
def serialize_dishes(dishes):
#, extras=('get_thumbnail_url', 'medium_img_url', 'is_liked_by_curr_user', 'is_disliked_by_curr_user', 'is_okd_by_curr_user', 'get_place_name'), relations=('dishimage_set',), use_natural_keys=True)
return serializers.serialize('json', dishes, indent=4, extras=('get_locations', 'get_display_image_src', 'get_thumbnail_image_src', ), relations=('place', 'pic', ))
#def serialize_ranks(ranks):
# return serializers.serialize('json', ranks, use_natural_keys=True)
# def serialize_reviews(reviews, user):
# return serializers.serialize('json', reviews, extras=('get_user_profile_icon_url', 'get_username'), use_natural_keys=True) # extras must be list!!! i.e. add a comma after first string
#def serialize_dish_photos_urls(photos):
# return serializers.serialize('json', photos, extras=('get_thumbnail_url',), use_natural_keys=True) | Python |
from django.core.serializers.json import DateTimeAwareJSONEncoder
from django.db import models
from django.utils.functional import Promise
from django.utils.encoding import force_unicode
# from django.utils import simplejson as json
import cjson
from decimal import Decimal
from django.core import serializers
from django.http import HttpResponse, HttpResponseForbidden, Http404
from django.core.mail import mail_admins
from django.db.models.query import QuerySet
import sys
def decode(data):
if not data:
return data
# return json.loads(data)
try:
return cjson.decode(data, encoding='utf-8')
except cjson.DecodeError:
return cjson.decode(data)
def encode(data, *args, **kwargs):
if type(data) == QuerySet: # Careful, ValuesQuerySet is a dict
# Django models
return serializers.serialize("json", data, *args, **kwargs)
else:
return cjson.encode(data, encoding='utf-8', key2str=True,
extension=lambda x: "\"%s\"" % str(x))
# return json_encode(data, *args, **kwargs)
def json_encode(data, *args, **kwargs):
"""
The main issues with django's default json serializer is that properties that
had been added to an object dynamically are being ignored (and it also has
problems with some models).
"""
def _any(data):
ret = None
# Opps, we used to check if it is of type list, but that fails
# i.e. in the case of django.newforms.utils.ErrorList, which extends
# the type "list". Oh man, that was a dumb mistake!
if isinstance(data, list):
ret = _list(data)
# Same as for lists above.
elif isinstance(data, dict):
ret = _dict(data)
elif isinstance(data, Decimal):
# json.dumps() cant handle Decimal
ret = str(data)
elif isinstance(data, models.query.QuerySet):
# Actually its the same as a list ...
ret = _list(data)
elif isinstance(data, models.Model):
ret = _model(data)
# here we need to encode the string as unicode (otherwise we get utf-16 in the json-response)
elif isinstance(data, basestring):
ret = unicode(data)
# see http://code.djangoproject.com/ticket/5868
elif isinstance(data, Promise):
ret = force_unicode(data)
else:
ret = data
return ret
def _model(data):
ret = {}
# If we only have a model, we only want to encode the fields.
for f in data._meta.fields:
ret[f.attname] = _any(getattr(data, f.attname))
# And additionally encode arbitrary properties that had been added.
fields = dir(data.__class__) + ret.keys()
add_ons = [k for k in dir(data) if k not in fields]
for k in add_ons:
ret[k] = _any(getattr(data, k))
return ret
def _list(data):
ret = []
for v in data:
ret.append(_any(v))
return ret
def _dict(data):
ret = {}
for k,v in data.items():
ret[str(k)] = _any(v)
return ret
ret = _any(data)
# return json.dumps(ret)
print str(ret)
return cjson.encode(ret)#, encoding='utf-8', extension=lambda x: "\"%s\"" % str(x))
def json_view(func):
def wrap(request, *a, **kw):
response = None
code = 200
try:
response = func(request, *a, **kw)
if isinstance(response, dict):
response = dict(response)
if 'result' not in response:
response['result'] = 'ok'
except KeyboardInterrupt:
# Allow keyboard interrupts through for debugging.
raise
except Http404:
raise Http404
except Exception, e:
# Mail the admins with the error
exc_info = sys.exc_info()
subject = 'JSON view error: %s' % request.path
try:
request_repr = repr(request)
except:
request_repr = 'Request repr() unavailable'
import traceback
message = 'Traceback:\n%s\n\nRequest:\n%s' % (
'\n'.join(traceback.format_exception(*exc_info)),
request_repr,
)
# print message
mail_admins(subject, message, fail_silently=True)
response = {'result': 'error',
'text': unicode(e)}
code = 500
if isinstance(response, HttpResponseForbidden):
return response
json = json_encode(response)
return HttpResponse(json, mimetype='application/json', status=code)
if isinstance(func, HttpResponse):
return func
else:
return wrap
def main():
test = {1: True, 2: u"string", 3: 30}
json_test = json_encode(test)
print test, json_test
if __name__ == '__main__':
main() | Python |
#!/usr/bin/env python
from django.core.management import execute_manager
import imp
try:
imp.find_module('settings') # Assumed to be in the same directory.
except ImportError:
import sys
sys.stderr.write("Error: Can't find the file 'settings.py' in the directory containing %r. It appears you've customized things.\nYou'll have to run django-admin.py, passing it your settings module.\n" % __file__)
sys.exit(1)
import settings
if __name__ == "__main__":
execute_manager(settings)
| Python |
from django.conf.urls.defaults import patterns, include, url
from django.conf import settings
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# Examples:
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
url(r'^admin/', include(admin.site.urls)),
(r'^accounts/login$', 'django.contrib.auth.views.login'),
(r'^accounts/logout$', 'django.contrib.auth.views.logout'),
(r'^accounts/register$', 'apps.food.views.register'),
(r'^', include('apps.food.urls')),
)
if settings.DEBUG:
urlpatterns += patterns('',
url(r'^media/(?P<path>.*)$', 'django.views.static.serve', {
'document_root': settings.MEDIA_ROOT,
}),
)
| Python |
# Django settings for totm project.
import os
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
SITE_ROOT = os.path.abspath(os.path.dirname(__file__))
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'postgresql_psycopg2', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'totm_db', # Or path to database file if using sqlite3.
'USER': 'postgres', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = os.path.join(SITE_ROOT, 'upload')
DEFAULT_PROFILE_PIC_FILENAME = os.path.join(SITE_ROOT, 'static/img/default.jpg')
DEBUG_MEDIA_URL = 'http://0.0.0.0:8000/media/'
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(SITE_ROOT, 'static'),
os.path.join(SITE_ROOT, 'cache'),
os.path.join(SITE_ROOT, 'upload')
)
ADMIN_MEDIA_ROOT = os.path.join(SITE_ROOT, 'upload')
# URL prefix for admin static files -- CSS, JavaScript and images.
# Make sure to use a trailing slash.
# Examples: "http://foo.com/static/admin/", "/static/admin/".
ADMIN_MEDIA_PREFIX = '/static/admin/'
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = '_ze%)ger#0wb0)wu!bwyc$x4=fzoth6$k3u3mbm$c^-r^!e)s_'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
ROOT_URLCONF = 'totm.urls'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(SITE_ROOT, 'templates')
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.static',
'django.contrib.messages.context_processors.messages',
'totm.apps.food.views.urls_consts_processor'
)
AUTH_PROFILE_MODULE = 'food.UserProfile'
LOGIN_URL = '/accounts/login'
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Uncomment the next line to enable the admin:
'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
'django.contrib.gis',
'south',
'apps.food',
)
SERIALIZATION_MODULES = {
'json': 'vendor.wads_serializer.json' # wadofstuff serializer
}
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
| Python |
from django import forms
from models import Dish
class DishForm(forms.ModelForm):
class Meta:
model = Dish
class DishReviewForm(forms.Form):
rating = forms.DecimalField()
text = forms.CharField(widget=forms.Textarea(attrs={'size' : '50'}))
img = forms.ImageField(required=False)
#class UploadImageForm(forms.Form):
# img = forms.ImageField() | Python |
from imagekit.specs import ImageSpec
from imagekit import processors
# first we define our thumbnail resize processor
class ResizeThumb(processors.Resize):
width = 150
height = 150
crop = True
# now we define a display size resize processor
class ResizeDisplay(processors.Resize):
width = 600
# now let's create an adjustment processor to enhance the image at small sizes
class EnchanceThumb(processors.Adjustment):
contrast = 1.2
sharpness = 1.1
# now we can define our thumbnail spec
class Thumbnail(ImageSpec):
access_as = 'thumbnail'
pre_cache = True
processors = [ResizeThumb, EnchanceThumb]
# and our display spec
class Display(ImageSpec):
increment_count = True
processors = [ResizeDisplay] | Python |
from django.conf import settings
from django.contrib.auth.models import User
from django.contrib.contenttypes import generic
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import ObjectDoesNotExist
from django.core.files import File
from django.db.models.signals import post_save, post_delete
from django.db import models, IntegrityError, transaction
from django.template.defaultfilters import slugify
from imagekit.models import ImageModel
from datetime import datetime
class NameSlugModel(models.Model):
name = models.CharField(max_length=255)
slug = models.SlugField(unique=True, editable=False)
class Meta:
abstract = True
def save(self, *args, **kwargs):
"""
Based on the Tag save() method in django-taggit, this method simply
stores a slugified version of the name, ensuring that the unique
constraint is observed
"""
self.slug = slug = slugify(self.name)
i = 0
while True:
try:
savepoint = transaction.savepoint()
res = super(NameSlugModel, self).save(*args, **kwargs)
transaction.savepoint_commit(savepoint)
return res
except IntegrityError:
transaction.savepoint_rollback(savepoint)
i += 1
self.slug = '%s_%d' % (slug, i)
class Photo(ImageModel):
content_type = models.ForeignKey(ContentType)
object_id = models.PositiveIntegerField()
content_object = generic.GenericForeignKey()
original_image = models.ImageField(upload_to='photos')
num_views = models.PositiveIntegerField(editable=False, default=0)
user = models.ForeignKey(User, null=True)
def admin_image(self):
if settings.DEBUG:
return '<img src="%s%s"/>' % (settings.DEBUG_MEDIA_URL, self.thumbnail.url)
else:
return '<img src="%s%s />' % (settings.MEDIA_URL, self.thumbnail.url)
admin_image.allow_tags = True
class IKOptions:
spec_module = 'apps.food.specs'
image_field = 'original_image'
save_count_as = 'num_views'
def create_profile_photo(user):
with open(settings.DEFAULT_PROFILE_PIC_FILENAME, 'r') as f:
p = ProfilePhoto(content_object=user, original_image=File(f), user=user)
p.save()
profile = user.get_profile()
profile.pic = p
profile.save()
def create_dish_photo(dish):
with open(settings.DEFAULT_PROFILE_PIC_FILENAME, 'r') as f:
p = Photo(content_object=dish, original_image=File(f))
p.save()
dish.pic = p
dish.save()
def create_profile_photo_receiver(sender, **kw):
print "entering create_profile_photo_receiver"
user = kw["instance"].content_object
if not user.get_profile().pic:
create_profile_photo(user)
def create_profile_receiver(sender, **kw):
user = kw["instance"]
if kw["created"] and not UserProfile.objects.filter(user=user):
up = UserProfile(user=user)
up.save()
# p = create_profile_photo(user)
# up = UserProfile(user=user, pic=p)
# up.save()
post_save.connect(create_profile_receiver, sender=User)
class ProfilePhoto(Photo):
class IKOptions:
spec_module = 'apps.food.specs_profile'
image_field = 'original_image'
save_count_as = 'num_views'
post_delete.connect(create_profile_photo_receiver, sender=ProfilePhoto)
class UserProfile(models.Model):
user = models.OneToOneField(User)
pic = models.ForeignKey(ProfilePhoto, on_delete=models.SET_NULL, null=True, blank=True)
#def __init__(self, *args, **kwargs):
# super(UserProfile, self).__init__(*args, **kwargs)
#self.save()
#self.pic = self.createDefaultPic()
def save(self, *args, **kwargs):
print "HAHAHAHAH"
super(UserProfile, self).save(*args, **kwargs)
if not self.pic:
self.pic = create_profile_photo(self.user)
def __unicode__(self):
return self.user.username
class Place(NameSlugModel):
''' A venue that serves food, e.g., restaurant, chain, cart, etc. '''
derivedTypeName = models.CharField(max_length=30, blank=True)
def get_display_name(self):
return self.__dispatcher().get_display_name()
def get_locations(self):
return self.__dispatcher().get_locations()
def __dispatcher(self):
return getattr(self, str(self.derivedTypeName))
def __unicode__(self):
return self.__dispatcher().__unicode__()
class Chain(Place):
''' A collection of restaurants under the same name'''
def get_display_name(self):
return self.name + ' (multiple locations)'
def get_locations(self):
return [{"lat": restaurant.lat, "lon": restaurant.lon} for restaurant in self.restaurant_set.all()]
def save(self, *args, **kwargs):
self.derivedTypeName = 'chain'
super(Chain, self).save(*args, **kwargs)
def __unicode__(self):
return self.name + ' (chain)'
class Restaurant(Place):
address = models.CharField(max_length=140, blank=True)
date_added = models.DateTimeField(auto_now_add=True, default=datetime.now())
last_modified = models.DateTimeField(auto_now=True, default=datetime.now())
chain = models.ForeignKey(Chain, null=True, blank=True)
lat = models.FloatField()
lon = models.FloatField()
def get_display_name(self):
return self.name + ' (restaurant)'
def get_locations(self):
return {"lat": self.lat, "lon": self.lon}
def save(self, *args, **kwargs):
self.derivedTypeName = 'restaurant'
super(Restaurant, self).save(*args, **kwargs)
def __unicode__(self):
return self.name + ' (' + str(self.address) + ')'
class Dish(NameSlugModel):
desc = models.TextField(null=True, blank=True)
photos = generic.GenericRelation(Photo, null=True, blank=True, related_name="wtf")
pic = models.ForeignKey(Photo, on_delete=models.SET_NULL, null=True, blank=True)
categories = models.ManyToManyField('Category', through='CategoryMembership', null=True, blank=True)
users_voted_good = models.ManyToManyField(User, related_name='dish_set_good', null=True, blank=True)
users_voted_bad = models.ManyToManyField(User, related_name='dish_set_bad', null=True, blank=True)
place = models.ForeignKey(Place, null=True, blank=True)
# used in serializer
def get_locations(self):
return self.place.get_locations()
# used in serializer
def get_display_image_src(self):
return self.pic.display.url
# used in serializer
def get_thumbnail_image_src(self):
return self.pic.thumbnail.url
def save(self, *args, **kwargs):
print "Entering save method of Dish model..."
super(Dish, self).save(*args, **kwargs)
if not self.pic:
self.pic = create_dish_photo(self)
def __unicode__(self):
return self.name + ' at ' + str(self.place)
class Category(NameSlugModel):
dishes = models.ManyToManyField(Dish, through='CategoryMembership')
ranking = models.CommaSeparatedIntegerField(max_length=100, default="", blank=True)
def __unicode__(self):
return self.name
class CategoryMembership(models.Model):
dish = models.ForeignKey(Dish)
category = models.ForeignKey(Category)
def __unicode__(self):
return str(self.dish) + " in " + str(self.category)
class Review(models.Model):
dish = models.ForeignKey(Dish)
profile = models.ForeignKey(UserProfile)
text = models.TextField()
date_added = models.DateField(auto_now_add=True)
last_modified = models.DateField(auto_now=True)
slug = models.SlugField(unique=True, editable=False)
rating = models.DecimalField(decimal_places=2, max_digits=5)
def save(self, *args, **kwargs):
"""
Based on the Tag save() method in django-taggit, this method simply
stores a slugified version of the name, ensuring that the unique
constraint is observed
"""
self.slug = slug = slugify(' '.join((self.profile.user.username, self.dish.name, self.dish.place.name)))
i = 0
while True:
try:
savepoint = transaction.savepoint()
res = super(Review, self).save(*args, **kwargs)
transaction.savepoint_commit(savepoint)
return res
except IntegrityError:
transaction.savepoint_rollback(savepoint)
i += 1
self.slug = '%s_%d' % (slug, i) | Python |
from django.conf.urls.defaults import patterns
from django.contrib import admin
admin.autodiscover()
consts = {
'CATEGORY' : 'c',
'DISH' : 'd',
'PLACE' : 'p',
'PROFILE' : 'u',
'EDIT_REVIEW' : 'edit_review',
'ADD_REVIEW' : 'add_review',
'DELETE_PIC' : 'delete/photo',
'RATE' : 'rate'
}
urlpatterns = patterns('apps.food.views',
(r'^$', 'home'),
(r'^' + consts['CATEGORY'] + '/([\w\-]+)$', 'category'),
(r'^' + consts['DISH'] + '/([\w\-]+)$', 'dish'),
(r'^' + consts['PLACE'] + '/([\w\-]+)$', 'place'),
(r'^' + consts['PROFILE'] + '/([\w\-\_\@\.\+]+)$', 'profile'),
#(r'^([\w\-]+)/([\w\-]+)$', 'display_review'),
(r'^' + consts['EDIT_REVIEW'] + '/(\d+)$', 'edit_review'),
(r'^' + consts['ADD_REVIEW'] + '/(\d+)$', 'add_review'),
(r'^add/dish$', 'add_dish'),
(r'^' + consts['DELETE_PIC'] + '/(\d+)$', 'delete_photo'),
(r'^' + consts['RATE'] + '/(?P<id>\d+)/(?P<rating>good|bad)$', 'rate')) | Python |
from django.contrib import admin
from django.contrib.contenttypes import generic
from apps.food.models import Category, CategoryMembership, Dish, UserProfile, Review, Place, Chain, Restaurant, Photo, ProfilePhoto
class PhotoAdmin(admin.ModelAdmin):
list_display = ('content_object', 'content_type', 'admin_image')
exclude = ('object_id',)
class ProfilePhotoInline(generic.GenericTabularInline):
model = ProfilePhoto
extra = 0
class ProfileAdmin(admin.ModelAdmin):
inlines = [
ProfilePhotoInline,
]
class PhotoInline(generic.GenericTabularInline):
model = Photo
extra = 1
class DishAdmin(admin.ModelAdmin):
inlines = [
PhotoInline,
]
class RestaurantAdmin(admin.ModelAdmin):
exclude = ('derivedTypeName',)
class RestaurantInline(admin.TabularInline):
model = Restaurant
fk_name = 'chain'
extra = 1
class ChainAdmin(admin.ModelAdmin):
inlines = [ RestaurantInline, ]
exclude = ('derivedTypeName',)
admin.site.register(Category)
admin.site.register(CategoryMembership)
admin.site.register(Dish, DishAdmin)
admin.site.register(UserProfile, ProfileAdmin)
admin.site.register(Review)
admin.site.register(Place)
admin.site.register(Restaurant, RestaurantAdmin)
admin.site.register(Chain, ChainAdmin)
admin.site.register(Photo, PhotoAdmin)
admin.site.register(ProfilePhoto) | Python |
from imagekit.specs import ImageSpec
from imagekit import processors
# first we define our thumbnail resize processor
class ResizeThumb(processors.Resize):
width = 35
height = 35
crop = True
# now we define a display size resize processor
class ResizeProfile(processors.Resize):
width = 150
# now we define a display size resize processor
class ResizeDisplay(processors.Resize):
width = 600
# now let's create an adjustment processor to enhance the image at small sizes
class EnchanceThumb(processors.Adjustment):
contrast = 1.2
sharpness = 1.1
# now we can define our thumbnail spec
class Thumbnail(ImageSpec):
access_as = 'thumbnail'
pre_cache = True
processors = [ResizeThumb, EnchanceThumb]
class Profile(ImageSpec):
processors = [ResizeProfile]
# and our display spec
class Display(ImageSpec):
increment_count = True
processors = [ResizeDisplay] | Python |
from django.conf import settings
from django.contrib.auth import authenticate, login
from django.contrib.auth.models import User
from django.contrib.auth.decorators import login_required
from django.contrib.auth.forms import UserCreationForm
from django.http import HttpResponse, HttpResponseRedirect
from django.template import RequestContext
from django.shortcuts import render_to_response, get_object_or_404
from models import Category, Dish, Review, Photo, ProfilePhoto, Place, UserProfile
from forms import DishForm, DishReviewForm
from urls import consts as urlconsts
#from utils.json_functions import json_encode
from utils.serialization_helpers import serialize_dishes
def urls_consts_processor(request):
return { 'urls' : urlconsts }
def home(request):
category_count_tuple = [(cat, len(cat.dishes.all())) for cat in Category.objects.all()]
reviews = Review.objects.order_by('-id')[:10]
# get all dish photos
photos = Photo.objects.filter(content_type__app_label='food', content_type__model='dish')
variables = RequestContext(request, { "categories" : category_count_tuple, 'reviews' : reviews, 'photos' : photos })
return render_to_response('food/main.html', variables)
def dish_location_map(dish):
''' Returns the string to display for location, and the location url '''
return (dish, dish.place.get_display_name(), dish.place.slug)
def category(request, slug):
cat = get_object_or_404(Category, slug=slug)
dishes = cat.dishes.all()
dish_location_tuple = map(dish_location_map, dishes)
json = serialize_dishes(dishes)
print str(json)
variables = RequestContext(request, {
"category" : cat,
"dish_location_tuple" : dish_location_tuple,
"dishes_json" : json
})
return render_to_response('food/category.html', variables)
def dish(request, slug):
dish = get_object_or_404(Dish, slug=slug)
reviews_by_user = None
# figure out if user has reviewed this dish before
if request.user.is_authenticated():
# get all revies of this dish by user
reviews_by_user = Review.objects.filter(profile__user__username__iexact=request.user.username,
dish__slug__iexact=slug).order_by('-date_added')
# if request.method == 'POST':
# check if user is logged in
# add upload image functionality from dish page
# uploadImageForm = UploadImageForm(request.POST, request.FILES)
# if uploadImageForm.is_valid():
# img = uploadImageForm.cleaned_data['img']
# p = Photo(content_object=dish, original_image=img, profile=request.user.get_profile())
# p.save()
# return HttpResponseRedirect("/d/" + slug)
# else:
# uploadImageForm = UploadImageForm()
variables = RequestContext(request, { "dish" : dish, "reviews_by_user" : reviews_by_user })
return render_to_response('food/dish.html', variables)
def place(request, slug):
place = get_object_or_404(Place, slug=slug)
dishes = place.dish_set.all()
variables = RequestContext(request, { "place" : place, "dishes" : dishes})
return render_to_response('food/place.html', variables)
@login_required
def add_review(request, id):
''' add a review for a dish '''
dish = get_object_or_404(Dish, id=id)
if request.method == 'POST':
form = DishReviewForm(request.POST, request.FILES)
if form.is_valid():
# create new review and save
review = Review()
review.rating = form.cleaned_data['rating']
review.text = form.cleaned_data['text']
review.dish = dish
review.profile = request.user.get_profile()
review.save()
# save photo
try:
if form.cleaned_data.has_key('img'):
img = form.cleaned_data['img']
if img:
p = Photo(content_object=review.dish, original_image=img, user=request.user)
p.save()
except Exception as e:
raise Exception("Exception saving photo in add_review: " + str(e))
return HttpResponseRedirect('/' + urlconsts['PROFILE'] + '/' + request.user.username)
else:
form = DishReviewForm()
variables = {
'form' : form,
'dish' : dish
}
rc = RequestContext(request, variables)
return render_to_response('food/add_review.html', rc)
@login_required
def edit_review(request, id):
''' edit an existing review '''
review = get_object_or_404(Review, id=id)
if request.method == 'POST':
form = DishReviewForm(request.POST, request.FILES)
if form.is_valid():
# update the existing review and save
review.rating = form.cleaned_data['rating']
review.text = form.cleaned_data['text']
review.save()
# save photo
try:
if form.cleaned_data.has_key('img'):
img = form.cleaned_data['img']
if img:
p = Photo(content_object=review.dish, original_image=img, user=request.user)
p.save()
except Exception as e:
raise Exception("Exception saving photo in edit_review: " + str(e))
return HttpResponseRedirect('/' + urlconsts['PROFILE'] + '/' + request.user.username)
else:
form = DishReviewForm()
variables = {
'form' : form,
'review' : review
}
rc = RequestContext(request, variables)
return render_to_response('food/edit_review.html', rc)
def display_review(request, username, dishname):
pass
def profile(request, username):
user = get_object_or_404(User, username=username)
dishes_liked = Dish.objects.filter(users_voted_good__username__iexact=username)
dishes_disliked = Dish.objects.filter(users_voted_bad__username__iexact=username)
# get all user's dish photos
photos = Photo.objects.filter(content_type__app_label='food', content_type__model='dish', user__username=username)
is_owner = False
if request.user.is_authenticated() and request.user.username.lower() == username.lower():
is_owner = True
variables = RequestContext(request, {
'profile' : user.get_profile(),
'is_owner' : is_owner,
'dishes_liked' : dishes_liked,
'dishes_disliked' : dishes_disliked,
'dish_photos' : photos
})
return render_to_response('food/profile.html', variables)
@login_required
def delete_photo(request, id):
photo = get_object_or_404(Photo, id=id)
photo.delete()
return HttpResponseRedirect('/' + urlconsts['PROFILE'] + '/' + request.user.username)
def add_dish(request):
variables = RequestContext(request, { "form" : DishForm() })
return render_to_response('food/add_dish.html', variables)
@login_required
def rate(request, rating, id):
dish = get_object_or_404(Dish, id=id)
r = rating.lower()
existingReview = Review.objects.filter(profile=request.user.get_profile(), dish=dish)
if existingReview:
existingReview = existingReview[0]
existingReviewText = existingReview.text
if request.method == 'POST':
form = DishReviewForm(request.POST, request.FILES)
if form.is_valid():
# create new review and save
reviewText = form.cleaned_data['text']
if existingReview:
existingReview.text = reviewText
existingReview.save()
else:
review = Review()
review.dish = dish
review.profile = request.user.get_profile()
review.text = reviewText
review.save()
print request.FILES
# save photo
try:
if form.cleaned_data.has_key('img'):
img = form.cleaned_data['img']
if img:
p = Photo(content_object=dish, original_image=img, user=request.user)
p.save()
except:
raise Exception("doh")
return HttpResponseRedirect('/' + request.user.username)
# if GET, then test if user has reviewed the specific dish,
# if he has, then populate fields with the existing review info
# if not, then create new form
else:
user_voted_good = dish.users_voted_good.filter(username=request.user.username)
user_voted_bad = dish.users_voted_bad.filter(username=request.user.username)
if r == 'good':
if user_voted_good:
pass
elif user_voted_bad:
dish.users_voted_bad.remove(request.user)
dish.users_voted_good.add(request.user)
else:
dish.users_voted_good.add(request.user)
elif r == 'bad':
if user_voted_bad:
pass
elif user_voted_good:
dish.users_voted_good.remove(request.user)
dish.users_voted_bad.add(request.user)
else:
dish.users_voted_bad.add(request.user)
else:
raise Exception("doh")
dish.save()
form = DishReviewForm()
if existingReview:
pass
# display all images that user has uploaded for this dish
# and with ability to delete
variables = {
"dish" : dish,
"rating" : r,
"form" : form
}
if existingReview:
variables['reviewText'] = existingReview.text
rc = RequestContext(request, variables)
return render_to_response('food/rate.html', rc)
def register(request):
if request.method == 'POST':
form = UserCreationForm(request.POST)
if form.is_valid():
new_user = form.save()
new_user = authenticate(username=new_user.username, password=form.cleaned_data["password1"])
login(request, new_user)
return HttpResponseRedirect('/' + urlconsts['PROFILE'] + '/' + request.user.username)
else:
form = UserCreationForm()
variables = RequestContext(request, { "form": form })
return render_to_response("registration/register.html", variables) | Python |
from django.db import models
# Create your models here.
| Python |
"""
This file demonstrates two different styles of tests (one doctest and one
unittest). These will both pass when you run "manage.py test".
Replace these with more appropriate tests for your application.
"""
from django.test import TestCase
class SimpleTest(TestCase):
def test_basic_addition(self):
"""
Tests that 1 + 1 always equals 2.
"""
self.failUnlessEqual(1 + 1, 2)
__test__ = {"doctest": """
Another way to test that 1 + 1 is equal to 2.
>>> 1 + 1 == 2
True
"""}
| Python |
from django.conf.urls.defaults import *
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('misc.views',
(r'^about/', 'about'),
) | Python |
from django.http import Http404, HttpResponseRedirect, HttpResponse
from django.shortcuts import render_to_response, get_object_or_404
from django.template import RequestContext
from django.contrib.auth import logout, login, authenticate
from django.contrib.auth.models import User
from django.contrib.auth.decorators import login_required
from django.contrib.auth.views import password_change, password_change_done
from django.db import models
from profile.models import ProfileImage, UserProfile
from profile.forms import UploadProfilePhotoForm, EditProfileInfoForm
from datetime import datetime
from common import globalvars
import urllib, re, settings
def about(request):
variables = RequestContext(request, {
})
return render_to_response('misc/about.html', variables) | Python |
#!/usr/bin/env python
from django.core.management import execute_manager
import sys
try:
import settings # Assumed to be in the same directory.
except ImportError:
import sys
sys.stderr.write("Error: Can't find the file 'settings.py' in the directory containing %r. It appears you've customized things.\nYou'll have to run django-admin.py, passing it your settings module.\n(If the file settings.py does indeed exist, it's causing an ImportError somehow.)\n" % __file__)
sys.exit(1)
if __name__ == "__main__":
execute_manager(settings)
| Python |
from django.db import models
# Create your models here.
| Python |
"""
This file demonstrates two different styles of tests (one doctest and one
unittest). These will both pass when you run "manage.py test".
Replace these with more appropriate tests for your application.
"""
from django.test import TestCase
class SimpleTest(TestCase):
def test_basic_addition(self):
"""
Tests that 1 + 1 always equals 2.
"""
self.failUnlessEqual(1 + 1, 2)
__test__ = {"doctest": """
Another way to test that 1 + 1 is equal to 2.
>>> 1 + 1 == 2
True
"""}
| Python |
from django.conf.urls.defaults import *
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('accounts.views',
# Example:
# (r'^dishpop/', include('dishpop.foo.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
# (r'^admin/doc/', include('django.contrib.admindocs.urls')),
(r'^user/(?P<user_name>\w+)/account$', 'account'),
(r'^user/(?P<user_name>\w+)/account/password_change$', 'change_password'),
) | Python |
from django.http import Http404, HttpResponseRedirect, HttpResponse
from django.shortcuts import render_to_response, get_object_or_404
from django.template import RequestContext
from django.contrib.auth import logout, login, authenticate
from django.contrib.auth.models import User
from django.contrib.auth.decorators import login_required
from django.contrib.auth.forms import PasswordChangeForm
from datetime import datetime
def GetUser(user_name):
try:
return User.objects.get(username=user_name)
except User.DoesNotExist:
raise Http404('Requested user not found.')
@login_required
def account(request, user_name):
user = GetUser(user_name)
variables = RequestContext(request, {
'user_public' : user,
})
return render_to_response('registration/account.html', variables)
def change_password(request, user_name):
if request.method == 'POST':
form = PasswordChangeForm(user=request.user, data=request.POST)
if form.is_valid():
form.save()
return HttpResponseRedirect('/user/'+ user_name + '/')
else:
form = PasswordChangeForm(user=request.user)
variables = RequestContext(request, { 'form': form })
return render_to_response('registration/password_change.html', variables) | Python |
from django import forms
from django.forms import ModelForm
from reviews.models import DishImage
import re
from django.contrib.auth.models import User
RATING_CHOICES = (
('1', '1 Horrible'),
('2', '2 Poor'),
('3', '3 Just ok'),
('4', '4 Good'),
('5', '5 Excellent'),
)
class SearchForm(forms.Form):
query = forms.CharField(initial='What kind of food?', max_length=50)
location = forms.CharField(initial='Where to look?', max_length=50)
class UploadDishPhotoForm(ModelForm):
class Meta:
model = DishImage
exclude = ('dish', 'user',)
class EditTagsForm(forms.Form):
dish_id = forms.IntegerField(widget=forms.HiddenInput, required=True)
tags = forms.CharField(max_length=100, required=False)
# this is currently used for adding new dish AND adding a review...
class ReviewForm(forms.Form):
name = forms.CharField()
place = forms.CharField()
tags = forms.CharField(required=False)
text = forms.CharField(widget=forms.Textarea, required=False)
rating = forms.ChoiceField(choices=RATING_CHOICES)
review_id = forms.IntegerField(widget=forms.HiddenInput, required=False)
class RegistrationForm(forms.Form):
username = forms.CharField(label=u'Username', max_length=30)
email = forms.EmailField(label=u'Email')
password1 = forms.CharField(
label=u'Password',
widget=forms.PasswordInput()
)
password2 = forms.CharField(
label=u'Password (Again)',
widget=forms.PasswordInput()
)
def clean_password2(self):
if 'password1' in self.cleaned_data:
password1 = self.cleaned_data['password1']
password2 = self.cleaned_data['password2']
if password1 == password2:
return password2
raise forms.ValidationError('Passwords do not match.')
def clean_username(self):
username = self.cleaned_data['username']
if not re.search(r'^\w+$', username):
raise forms.ValdiationError('Username can only contain '
'alphanumeric characters and the underscore.')
try:
User.objects.get(username=username)
except User.DoesNotExist:
return username
raise forms.ValidationError('Username is already taken.')
| Python |
from imagekit.specs import ImageSpec
from imagekit import processors
# first we define our thumbnail resize processor
class ResizeThumb(processors.Resize):
width = 100
height = 75
crop = True
class ResizeMedium(processors.Resize):
width = 275
height = 275
crop = True
# now we define a display size resize processor
class ResizeDisplay(processors.Resize):
width = 600
class EnhanceMedium(processors.Adjustment):
contrast = 1.2
sharpness = 1.1
# now lets create an adjustment processor to enhance the image at small sizes
class EnchanceThumb(processors.Adjustment):
contrast = 1.2
sharpness = 1.1
class MediumDisplay(ImageSpec):
pre_cache = True
processors = [ResizeMedium, EnhanceMedium]
# now we can define our thumbnail spec
class Thumbnail(ImageSpec):
#access_as = 'thumbnail_image'
pre_cache = True
processors = [ResizeThumb, EnchanceThumb]
# and our display spec
class Display(ImageSpec):
increment_count = True
processors = [ResizeDisplay] | Python |
from datetime import datetime
from django.db import models, IntegrityError, transaction
from django.template.defaultfilters import slugify
from tagging.models import Tag
from common.models import NameSlugModel, DateAwareModel, MyImageModel, UserOwnedModel, LocationAwareModel
class Place(NameSlugModel, DateAwareModel):
def __unicode__(self):
return self.name
class Dish(NameSlugModel, DateAwareModel):
place = models.ForeignKey(Place, null=True, blank=True)
tags = models.ManyToManyField(Tag, null=True, blank=True) # dish can be a specific food or a combination of foods
def __unicode__(self):
return self.name
class PlaceImage(MyImageModel, UserOwnedModel):
place = models.ForeignKey(Place)
class DishImage(MyImageModel, UserOwnedModel):
dish = models.ForeignKey(Dish)
class Review(DateAwareModel, UserOwnedModel):
text = models.TextField(blank=True)
dish = models.ForeignKey(Dish)
rating = models.IntegerField(blank=True, null=True)
tags = models.ManyToManyField(Tag, null=True, blank=True)
def __unicode__(self):
return str(self.created_date) + ' ' + str(self.dish) + ' by ' + str(self.user) | Python |
"""
This file demonstrates two different styles of tests (one doctest and one
unittest). These will both pass when you run "manage.py test".
Replace these with more appropriate tests for your application.
"""
from django.test import TestCase
class SimpleTest(TestCase):
def test_basic_addition(self):
"""
Tests that 1 + 1 always equals 2.
"""
self.failUnlessEqual(1 + 1, 2)
__test__ = {"doctest": """
Another way to test that 1 + 1 is equal to 2.
>>> 1 + 1 == 2
True
"""}
| Python |
from django.conf.urls.defaults import *
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('reviews.views',
# Example:
# (r'^dishpop/', include('dishpop.foo.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
# (r'^admin/doc/', include('django.contrib.admindocs.urls')),
(r'^$', 'index'),
(r'^tag/(?P<tag_id>\d+)/$', 'tag'),
#(r'^user/(?P<user_name>\w+)/$', 'user_page'),
(r'^dish/(?P<dish_id>\d+)/$', 'dish'),
(r'^dish/(?P<dish_id>\d+)/addphoto$', 'upload_photo'),
(r'^place/(?P<place_slug>[a-z0-9-]+)/$', 'place_page'),
(r'^submit$', 'submit_dish'),
(r'^submit/$', 'submit_dish'),
(r'^submit/tags/(?P<dish_id>\d+)/$', 'edit_tags'),
(r'^submit/(?P<dish_id>\d+)$', 'submit_review'),
(r'^delete/$', 'delete_review_or_image'),
(r'^search/$', 'search_page'),
(r'^logout/$', 'logout_page'),
) | Python |
from django.contrib import admin
from reviews.models import Dish, Place, Review, DishImage
from tagging.models import Tag, Tag_Relation
class ImageAdmin(admin.ModelAdmin):
list_display = ('name', 'admin_thumbnail_view',)
class PlaceAdmin(admin.ModelAdmin):
exclude = ('modified_date', 'created_date')
class DishAdmin(admin.ModelAdmin):
exclude = ('modified_date', 'created_date')
admin.site.register(Tag)
admin.site.register(Tag_Relation)
admin.site.register(Dish)
admin.site.register(Place)
admin.site.register(Review)
admin.site.register(DishImage, ImageAdmin)
| Python |
from reviews.models import Dish, Place, Review, DishImage
from tagging.models import Tag
from django.http import Http404, HttpResponseRedirect, HttpResponse
from django.shortcuts import render_to_response, get_object_or_404
from django.template import RequestContext
from django.contrib.auth import logout, login, authenticate
from django.contrib.auth.models import User
from django.contrib.auth.decorators import login_required
from reviews.forms import ReviewForm, RegistrationForm, UploadDishPhotoForm, SearchForm, EditTagsForm
from django.contrib.auth.forms import PasswordChangeForm
from datetime import datetime
from tagging.utils import parse_tags, edit_string_for_tags
def add_search_form_processor(request):
form = SearchForm()
return {'search_form': form }
def register_page(request):
if request.method == 'POST':
form = RegistrationForm(request.POST)
if form.is_valid():
cd = form.cleaned_data
user = User.objects.create_user(
username = cd['username'].lower(),
password = cd['password1'],
email = cd['email']
)
# automagically log user in
user = authenticate(username = cd['username'].lower(), password = cd['password1'])
login(request, user)
return HttpResponseRedirect('/')
else:
form = RegistrationForm()
variables = RequestContext(request, { 'form': form })
return render_to_response('registration/register.html', variables)
def logout_page(request):
logout(request)
return HttpResponseRedirect('/')
def user_page(request, user_name):
try:
user = User.objects.get(username=user_name)
except User.DoesNotExist:
raise Http404('Requested user not found.')
print user.username
variables = RequestContext(request, {
'user_public' : user,
'show_edit' : user_name == request.user.username,
})
return render_to_response('reviews/user_page.html', variables)
def index(request):
'''List all dishes and their corresponding tags.'''
form = SearchForm()
num_latest = 100
dishes = Dish.objects.all().order_by('-created_date')[:num_latest]
show_results = False
if 'query' in request.GET:
show_results = True
query = request.GET['query'].strip()
if query:
form = SearchForm({'query' : query})
dishes = Dish.objects.filter(
name__icontains=query
)[:10]
variables = RequestContext(request, {
'form': form,
'dishes': dishes,
'show_results': show_results,
'show_tags': True,
'show_user': True,
'show_edit' : request.user.is_authenticated(),
'num_latest': num_latest,
})
if request.GET.has_key('ajax'):
return render_to_response('reviews/dish_list.html', variables)
else:
return render_to_response('reviews/main_page.html', variables)
def search_page(request):
form = SearchForm()
dishes = []
show_results = False
if 'query' in request.GET:
show_results = True
query = request.GET['query'].strip()
if query:
form = SearchForm({'query' : query})
dishes = Dish.objects.filter(
name__icontains=query
)[:10]
variables = RequestContext(request, {
'form': form,
'dishes': dishes,
'show_results': show_results,
'show_tags': True,
'show_user': True
})
if request.GET.has_key('ajax'):
return render_to_response('reviews/dish_list.html', variables)
else:
return render_to_response('reviews/search_page.html', variables)
def tag(request, tag_id):
'''List all the dishes associated with the specified tag.'''
tag = get_object_or_404(Tag, id=tag_id)
related_tags = tag.all_related_tags()
return render_to_response('reviews/tag.html', RequestContext(request, {'tag' : tag, 'related_tags' : related_tags}))
def place_page(request, place_slug):
place = get_object_or_404(Place, slug=place_slug)
variables = RequestContext(request, {
'place' : place,
'show_edit' : request.user.is_authenticated()
})
return render_to_response('reviews/place.html', variables)
def dish(request, dish_id):
'''List all the reviews for the given dish'''
d = get_object_or_404(Dish, id=dish_id)
reviews = d.review_set.all()
avg_rating = None
if reviews:
avg_rating = float(sum([rev.rating for rev in reviews]))/len(reviews)
variables = RequestContext(request, {
'dish' : d,
'avg_rating' : avg_rating
})
return render_to_response('reviews/dish.html', variables)
@login_required
def upload_photo(request, dish_id):
if dish_id:
dish = get_object_or_404(Dish, id=dish_id)
if request.method == 'POST':
form = UploadDishPhotoForm(request.POST, request.FILES)
if form.is_valid():
new_dish_image = form.save(commit=False)
new_dish_image.dish = dish
new_dish_image.user = request.user
new_dish_image.save()
return HttpResponseRedirect('/dish/' + str(dish.id))
else:
form = UploadDishPhotoForm()
return render_to_response('reviews/upload.html', {'form': form}, context_instance=RequestContext(request))
@login_required
def submit_dish(request):
dish_id = None
if request.method == 'POST':
form = ReviewForm(request.POST)
if form.is_valid():
cd = form.cleaned_data
placename = cd['place']
place, dummy = Place.objects.get_or_create(name = placename)
dish, dummy = Dish.objects.get_or_create(name = cd['name'],
place = place)
dish_id = dish.id
return submit_review(request, dish_id)
else:
initial_data = {'text': 'Write stuff here.'}
form = ReviewForm(initial = initial_data)
return render_to_response('reviews/submit_form_page.html', {'form': form},
context_instance=RequestContext(request))
@login_required
def edit_tags(request, dish_id):
dish = get_object_or_404(Dish, id=dish_id)
ajax = 'ajax' in request.GET
initial_data = {}
if request.method == 'POST':
form = EditTagsForm(request.POST)
if form.is_valid():
cd = form.cleaned_data
# parse tags and add to dish
# what if users are stupid and tag dishes badly?
tag_string = cd['tags']
tag_list = parse_tags(tag_string)
#tag_obj_list = []
dish.tags.clear()
for tag in tag_list:
tag_obj, dummy = Tag.objects.get_or_create(name = tag)
#tag_obj_list.append(tag_obj)
if not tag_obj in dish.tags.all():
dish.tags.add(tag_obj)
if ajax:
variables = RequestContext(request, {
'dish': dish,
'show_edit': request.user.is_authenticated(),
})
return render_to_response(
'reviews/tag_list.html', variables
)
return HttpResponseRedirect('/dish/' + str(dish.id))
else:
if ajax:
return HttpResponse(u'failure')
tags_string = edit_string_for_tags(dish.tags.all())
initial_data['tags'] = tags_string
initial_data['dish_id'] = dish_id
form = EditTagsForm(
initial = initial_data
)
if ajax:
return render_to_response('reviews/edit_tags_form.html', {'form': form},
context_instance=RequestContext(request))
return render_to_response('reviews/submit_form_page.html', {'form': form},
context_instance=RequestContext(request))
@login_required
def delete_review_or_image(request):
if 'review_id' in request.GET:
review_id = request.GET['review_id']
review = get_object_or_404(Review, id=review_id)
review.delete()
elif 'photo_id' in request.GET:
photo_id = request.GET['photo_id']
photo = get_object_or_404(DishImage, id=photo_id)
photo.delete()
return HttpResponseRedirect('/user/' + str(request.user.username))
@login_required
def submit_review(request, dish_id):
dish = get_object_or_404(Dish, id=dish_id)
initial_data = {}
if request.method == 'POST':
form = ReviewForm(request.POST)
if form.is_valid():
cd = form.cleaned_data
if 'review_id' in cd and cd['review_id']:
review_id = cd['review_id']
review = Review.objects.get(id = review_id)
review.text = cd['text']
review.rating = cd['rating']
else:
review = Review.objects.create(text = cd['text'],
dish = dish,
user = request.user,
rating = cd['rating'])
# parse tags and add to dish
# what if users are stupid and tag dishes badly?
tag_string = cd['tags']
tag_list = parse_tags(tag_string)
for tag in tag_list:
tag_obj, dummy = Tag.objects.get_or_create(name = tag)
tag_obj.review_set.add(review)
if not tag_obj in dish.tags.all():
dish.tags.add(tag_obj)
dish.review_set.add(review)
return HttpResponseRedirect('/dish/' + str(dish.id))
elif 'review_id' in request.GET: # case for editing an existing review
review_id = request.GET['review_id']
try:
review = Review.objects.get(id = review_id)
tags_string = edit_string_for_tags(review.tags.all())
initial_data['tags'] = tags_string
initial_data['text'] = review.text
initial_data['rating'] = review.rating
initial_data['review_id'] = review.id
except (Review.DoesNotExist):
pass
else: # case for new review
initial_data['text'] = 'Write stuff here.'
initial_data['name'] = dish.name
initial_data['place'] = dish.place
form = ReviewForm(
initial = initial_data
)
return render_to_response('reviews/submit_form_page.html', {'form': form},
context_instance=RequestContext(request))
# def add_dish(request):
# #dish = get_object_or_404(Dish, id=dish_id)
# if request.method == 'POST':
# form = NewDishForm(request.POST)
# if form.is_valid():
# cd = form.cleaned_data
# dish = Dish.objects.create(name = cd['name'],
# place = cd['place'])
# review.save()
# return HttpResponseRedirect('/')
# else:
# form = ReviewForm(
# initial = {'text': 'Write stuff here.'}
# )
# return render_to_response('reviews/review.html', {'form': form, 'dish': dish},
# context_instance=RequestContext(request))
# def add_review(request, dish_id):
# dish = get_object_or_404(Dish, id=dish_id)
# try:
# review = Review.objects.create(text=str(request.POST['review_text']), dish=dish)
# except (KeyError):
# # Redisplay the review form.
# return render_to_response('reviews/review.html', {
# 'dish': dish,
# 'error_message': "You didn't seleAdasdfasfct a choice.",
# }, context_instance=RequestContext(request))
# else:
# review.save()
# # Always return an HttpResponseRedirect after successfully dealing
# # with POST data. This prevents data from being posted twice if a
# # user hits the Back button.
# return HttpResponseRedirect(reverse('reviews.views.dish', args=(dish.id,))) | Python |
#!/usr/bin/env python
from django.core.management import execute_manager
import sys
try:
import settings # Assumed to be in the same directory.
except ImportError:
import sys
sys.stderr.write("Error: Can't find the file 'settings.py' in the directory containing %r. It appears you've customized things.\nYou'll have to run django-admin.py, passing it your settings module.\n(If the file settings.py does indeed exist, it's causing an ImportError somehow.)\n" % __file__)
sys.exit(1)
if __name__ == "__main__":
execute_manager(settings)
| Python |
from django.conf.urls.defaults import *
import os
# Uncomment the next two lines to enable the admin:##
from django.contrib import admin
admin.autodiscover()
site_media = os.path.join(os.path.dirname(__file__), 'site_media')
urlpatterns = patterns('',
# Example:
# (r'^kokoomi/', include('kokoomi.foo.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
# (r'^admin/doc/', include('django.contrib.admindocs.urls')),
(r'^', include('reviews.urls')),
(r'^', include('profile.urls')),
(r'^', include('accounts.urls')),
(r'^', include('misc.urls')),
(r'^admin/', include(admin.site.urls)),
(r'^login/$', 'django.contrib.auth.views.login'),
(r'^register/$', 'reviews.views.register_page'), # consider moving this out
(r'^site_media/(?P<path>.*)$', 'django.views.static.serve', {'document_root': site_media}),
) | Python |
from django import forms
from django.forms import ModelForm
from django.contrib.auth.models import User
from profile.models import ProfileImage, UserProfile
class UploadProfilePhotoForm(ModelForm):
class Meta:
model = ProfileImage
exclude = ('user', 'isPrimary', 'name')
class EditProfileInfoForm(ModelForm):
class Meta:
model = UserProfile
exclude = ('user') | Python |
from imagekit.specs import ImageSpec
from imagekit import processors
# first we define our thumbnail resize processor
class ResizeThumb(processors.Resize):
width = 100
height = 75
crop = True
# now we define a display size resize processor
class ResizeDisplay(processors.Resize):
width = 250
# now lets create an adjustment processor to enhance the image at small sizes
class EnchanceThumb(processors.Adjustment):
contrast = 1.2
sharpness = 1.1
# now we can define our thumbnail spec
class Thumbnail(ImageSpec):
#access_as = 'thumbnail_image'
pre_cache = True
processors = [ResizeThumb, EnchanceThumb]
# and our display spec
class Display(ImageSpec):
increment_count = True
processors = [ResizeDisplay]
| Python |
from django.db import models
from django.db.models.signals import post_save
from django.contrib.auth.models import User
from common.models import MyImageModel, UserOwnedModel
class ProfileImage(MyImageModel, UserOwnedModel):
isPrimary = models.BooleanField()
class IKOptions:
# This inner class is where we define the ImageKit options for the model
spec_module = 'kokoomi.profile.specs'
cache_dir = 'images'
image_field = 'image'
save_count_as = 'num_views'
class UserProfile(models.Model):
user = models.OneToOneField(User)
location = models.CharField(max_length=255,blank=True)
website = models.URLField(blank=True)
def create_profile(sender, **kw):
user = kw["instance"]
if kw["created"]:
profile = UserProfile(user=user)
profile.save()
post_save.connect(create_profile, sender=User, dispatch_uid="users-profilecreation-signal") | Python |
"""
This file demonstrates two different styles of tests (one doctest and one
unittest). These will both pass when you run "manage.py test".
Replace these with more appropriate tests for your application.
"""
from django.test import TestCase
class SimpleTest(TestCase):
def test_basic_addition(self):
"""
Tests that 1 + 1 always equals 2.
"""
self.failUnlessEqual(1 + 1, 2)
__test__ = {"doctest": """
Another way to test that 1 + 1 is equal to 2.
>>> 1 + 1 == 2
True
"""}
| Python |
from django.conf.urls.defaults import *
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('profile.views',
# Example:
# (r'^dishpop/', include('dishpop.foo.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
# (r'^admin/doc/', include('django.contrib.admindocs.urls')),
(r'^user/(?P<user_name>\w+)/$', 'profile'),
(r'^user/(?P<user_name>\w+)/profilepic$', 'upload_photo'),
(r'^user/(?P<user_name>\w+)/edit$', 'edit_profile'),
(r'^user/(?P<user_name>\w+)/profilepic/(?P<profileimg_id>\d+)/c$', 'change_photo'),
(r'^user/(?P<user_name>\w+)/profilepic/(?P<profileimg_id>\d+)/d$', 'delete_photo'),
) | Python |
from django.http import Http404, HttpResponseRedirect, HttpResponse
from django.shortcuts import render_to_response, get_object_or_404
from django.template import RequestContext
from django.contrib.auth import logout, login, authenticate
from django.contrib.auth.models import User
from django.contrib.auth.decorators import login_required
from django.contrib.auth.views import password_change, password_change_done
from django.db import models
from profile.models import ProfileImage, UserProfile
from profile.forms import UploadProfilePhotoForm, EditProfileInfoForm
from datetime import datetime
from common import globalvars
import urllib, re, settings
def GetUser(user_name):
try:
return User.objects.get(username=user_name)
except User.DoesNotExist:
raise Http404('Requested user not found.')
def GetDefaultImageUrl(user):
if user.profileimage_set.count() > 0:
media_url = user.profileimage_set.order_by('modified_date').reverse()[0].mediumdisplay.url
else:
media_url = globalvars.GetGenericAvatarImgURL()
return media_url
@login_required
def profile(request, user_name):
user = GetUser(user_name)
review_dict = []
# match the dish with the dishimage
dish_imgs = user.dishimage_set.all()
reviews = user.review_set.all()
for review in reviews:
try:
img_url = dish_imgs.filter(dish=review.dish)[0].thumbnail.url
review_dict.append((review, img_url))
except:
review_dict.append((review, globalvars.GetGenericFoodImgURL()))
variables = RequestContext(request, {
'user_public' : user,
'profile' : user.get_profile(),
'media_url' : GetDefaultImageUrl(user),
'review_dict' : review_dict,
})
return render_to_response('profile/profile.html', variables)
@login_required
def edit_profile(request, user_name):
if request.method == 'POST':
form = EditProfileInfoForm(request.POST)
if form.is_valid():
userprofile = UserProfile.objects.get(user=request.user)
if form.cleaned_data['location'] != "":
userprofile.location = form.cleaned_data['location']
if form.cleaned_data['website'] != "":
userprofile.website = form.cleaned_data['website']
userprofile.save()
return HttpResponseRedirect('/user/' + user_name)
else:
form = EditProfileInfoForm()
user = GetUser(user_name)
variables = RequestContext(request, {
'user_public' : user,
'media_url' : GetDefaultImageUrl(user),
})
return render_to_response('profile/profile_edit.html', {'form': form}, context_instance=variables)
@login_required
def upload_photo(request, user_name):
if request.method == 'POST':
form = UploadProfilePhotoForm(request.POST, request.FILES)
if form.is_valid():
new_profile_image = form.save(commit=False)
new_profile_image.name = user_name
new_profile_image.isPrimary = True
new_profile_image.user = request.user
new_profile_image.save()
return HttpResponseRedirect('/user/' + user_name)
else:
form = UploadProfilePhotoForm()
variables = RequestContext(request, {
'media_url' : GetDefaultImageUrl(GetUser(user_name)),
})
return render_to_response('profile/profilepic.html', {'form': form}, context_instance=variables)
@login_required
def change_photo(request, user_name, profileimg_id):
try:
user = User.objects.get(username=user_name)
except User.DoesNotExist:
raise Http404('Requested user not found.')
img = user.profileimage_set.get(pk=profileimg_id)
img.modified_date = datetime.now()
img.save()
variables = RequestContext(request, {
'user_public' : user,
'media_url' : GetDefaultImageUrl(user),
})
return HttpResponseRedirect('/user/' + user_name)
@login_required
def delete_photo(request, user_name, profileimg_id):
try:
user = User.objects.get(username=user_name)
except User.DoesNotExist:
raise Http404('Requested user not found.')
user.profileimage_set.get(pk=profileimg_id).delete()
variables = RequestContext(request, {
'user_public' : user,
'media_url' : GetDefaultImageUrl(user),
})
return HttpResponseRedirect('/user/' + user_name + '/profilepic') | Python |
# Django settings for kokoomi project.
import os
SITE_ROOT = os.path.abspath(os.path.dirname(__file__))
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@domain.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
'NAME': os.path.join(SITE_ROOT, 'kokoomi_db'), # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = os.path.join(SITE_ROOT, 'site_media')
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
MEDIA_URL = '/site_media/'
LOGIN_URL = '/login/'
LOGIN_REDIRECT_URL = '/' # consider making this the user's profile or account page
AUTH_PROFILE_MODULE = 'profile.UserProfile'
# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/".
ADMIN_MEDIA_PREFIX = '/media/'
# Make this unique, and don't share it with anybody.
SECRET_KEY = '@&$gmbry6m+%^^f5(8hs2k@64i3lsxahec^%_6@z5&7z29%ywj'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
ROOT_URLCONF = 'kokoomi.urls'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(SITE_ROOT, 'templates'),
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
# Uncomment the next line to enable the admin:
'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
'reviews',
'tagging',
'imagekit',
'common',
'profile',
'accounts',
'misc',
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.contrib.messages.context_processors.messages',
'reviews.views.add_search_form_processor',
) | Python |
from django.db import models
class Tag(models.Model):
name = models.CharField(max_length=200, unique=True)
related_tags = models.ManyToManyField('self', blank=True, symmetrical=False, through='Tag_Relation')
def rec_lookup(self, f, st):
for obj in f():
st.add(obj)
obj.rec_lookup(getattr(obj, f.__name__), st)
def sub_types(self):
return set([rel.source for rel in self.source_relation_set.all() if rel.is_a])
def super_types(self):
return set([rel.target for rel in self.target_relation_set.all() if rel.is_a])
def sub_objects(self):
return set([rel.target for rel in self.target_relation_set.all() if rel.has_a])
def super_objects(self):
return set([rel.source for rel in self.source_relation_set.all() if rel.has_a])
def has_related_tags(self):
return self.sub_types or self.super_types or self.sub_objects or self.super_objects
def rec_sub_types(self):
st = set([])
self.rec_lookup(self.sub_types, st)
return st
def rec_super_types(self):
st = set([])
self.rec_lookup(self.super_types, st)
return st
def rec_sub_objects(self):
st = set([])
self.rec_lookup(self.sub_objects, st)
return st
def rec_super_objects(self):
st = set([])
self.rec_lookup(self.super_objects, st)
return st
def all_related_tags(self):
return self.rec_sub_types() | self.rec_super_types() | self.rec_sub_objects() | self.rec_super_objects()
def add_sub_type(self, sub_type):
return Tag_Relation(source=sub_type, target=self, is_a=True)
def add_super_type(self, super_type):
return Tag_Relation(source=self, target=super_type, is_a=True)
def add_sub_object(self, sub_object):
return Tag_Relation(source=self, target=sub_object, has_a=True)
def add_super_object(self, super_object):
return Tag_Relation(source=super_object, target=self, has_a=True)
def __unicode__(self):
return self.name
class Tag_Relation(models.Model):
source = models.ForeignKey(Tag, related_name='target_relation_set')
target = models.ForeignKey(Tag, related_name='source_relation_set')
is_a = models.BooleanField(default=False); # True if source is a target
has_a = models.BooleanField(default=False); # True if source has a target
class Meta:
unique_together = ("source", "target")
def __unicode__(self):
if self.is_a:
return self.source.name + " is a type of " + self.target.name
elif self.has_a:
return self.source.name + " consists of " + self.target.name
else:
return "error"
| Python |
"""
This file demonstrates two different styles of tests (one doctest and one
unittest). These will both pass when you run "manage.py test".
Replace these with more appropriate tests for your application.
"""
from django.test import TestCase
class SimpleTest(TestCase):
def test_basic_addition(self):
"""
Tests that 1 + 1 always equals 2.
"""
self.failUnlessEqual(1 + 1, 2)
__test__ = {"doctest": """
Another way to test that 1 + 1 is equal to 2.
>>> 1 + 1 == 2
True
"""}
| Python |
from django.utils.encoding import force_unicode
def parse_tags(tagstring):
"""
Parses tag input, with multiple word input being activated and
delineated by commas and double quotes. Quotes take precedence, so
they may contain commas.
Returns a sorted list of unique tag names.
Ported from Jonathan Buchanan's `django-tagging
<http://django-tagging.googlecode.com/>`_
"""
if not tagstring:
return []
tagstring = force_unicode(tagstring)
# Special case - if there are no commas or double quotes in the
# input, we don't *do* a recall... I mean, we know we only need to
# split on spaces.
if u',' not in tagstring and u'"' not in tagstring:
words = list(set(split_strip(tagstring, u' ')))
words.sort()
return words
words = []
buffer = []
# Defer splitting of non-quoted sections until we know if there are
# any unquoted commas.
to_be_split = []
saw_loose_comma = False
open_quote = False
i = iter(tagstring)
try:
while True:
c = i.next()
if c == u'"':
if buffer:
to_be_split.append(u''.join(buffer))
buffer = []
# Find the matching quote
open_quote = True
c = i.next()
while c != u'"':
buffer.append(c)
c = i.next()
if buffer:
word = u''.join(buffer).strip()
if word:
words.append(word)
buffer = []
open_quote = False
else:
if not saw_loose_comma and c == u',':
saw_loose_comma = True
buffer.append(c)
except StopIteration:
# If we were parsing an open quote which was never closed treat
# the buffer as unquoted.
if buffer:
if open_quote and u',' in buffer:
saw_loose_comma = True
to_be_split.append(u''.join(buffer))
if to_be_split:
if saw_loose_comma:
delimiter = u','
else:
delimiter = u' '
for chunk in to_be_split:
words.extend(split_strip(chunk, delimiter))
words = list(set(words))
words.sort()
return words
def split_strip(string, delimiter=u','):
"""
Splits ``string`` on ``delimiter``, stripping each resulting string
and returning a list of non-empty strings.
Ported from Jonathan Buchanan's `django-tagging
<http://django-tagging.googlecode.com/>`_
"""
if not string:
return []
words = [w.strip() for w in string.split(delimiter)]
return [w for w in words if w]
def edit_string_for_tags(tags):
"""
Given list of ``Tag`` instances, creates a string representation of
the list suitable for editing by the user, such that submitting the
given string representation back without changing it will give the
same list of tags.
Tag names which contain commas will be double quoted.
If any tag name which isn't being quoted contains whitespace, the
resulting string of tag names will be comma-delimited, otherwise
it will be space-delimited.
Ported from Jonathan Buchanan's `django-tagging
<http://django-tagging.googlecode.com/>`_
"""
names = []
for tag in tags:
name = tag.name
if u',' in name or u' ' in name:
names.append('"%s"' % name)
else:
names.append(name)
return u', '.join(sorted(names)) | Python |
# Create your views here.
| Python |
import settings
def GetGenericAvatarImgURL():
return settings.MEDIA_URL + 'images/generic.jpg'
def GetGenericFoodImgURL():
return settings.MEDIA_URL + 'images/common/missingfoodpicture.jpg' | Python |
from datetime import datetime
from django.db import models, IntegrityError, transaction
from django.contrib.auth.models import User
from django.template.defaultfilters import slugify
from imagekit.models import ImageModel
class LocationAwareModel(models.Model):
lat = models.FloatField(null=True, blank=True)
lon = models.FloatField(null=True, blank=True)
class Meta:
abstract = True
class NameSlugModel(models.Model):
name = models.CharField(max_length=255)
slug = models.SlugField(unique=True, editable=False)
class Meta:
abstract = True
def save(self, *args, **kwargs):
"""
Based on the Tag save() method in django-taggit, this method simply
stores a slugified version of the name, ensuring that the unique
constraint is observed
"""
self.slug = slug = slugify(self.name)
i = 0
while True:
try:
savepoint = transaction.savepoint()
res = super(NameSlugModel, self).save(*args, **kwargs)
transaction.savepoint_commit(savepoint)
return res
except IntegrityError:
transaction.savepoint_rollback(savepoint)
i += 1
self.slug = '%s_%d' % (slug, i)
class DateAwareModel(models.Model):
modified_date = models.DateTimeField(auto_now=True)
created_date = models.DateTimeField(auto_now_add=True)
class Meta:
abstract = True
class MyImageModel(ImageModel, NameSlugModel, DateAwareModel):
image = models.ImageField(upload_to='images')
num_views = models.PositiveIntegerField(editable=False, default=0)
class IKOptions:
spec_module = 'kokoomi.reviews.specs'
cache_dir = 'cache'
image_field = 'image'
save_count_as = 'num_views'
admin_thumbnail_spec = 'thumbnail'
class Meta:
abstract = True
def __unicode__(self):
return self.slug
class UserOwnedModel(models.Model):
user = models.ForeignKey(User)
class Meta:
abstract = True | Python |
"""
This file demonstrates two different styles of tests (one doctest and one
unittest). These will both pass when you run "manage.py test".
Replace these with more appropriate tests for your application.
"""
from django.test import TestCase
class SimpleTest(TestCase):
def test_basic_addition(self):
"""
Tests that 1 + 1 always equals 2.
"""
self.failUnlessEqual(1 + 1, 2)
__test__ = {"doctest": """
Another way to test that 1 + 1 is equal to 2.
>>> 1 + 1 == 2
True
"""}
| Python |
# Create your views here.
| Python |
# Imports
from re import sub, findall
import string
# Globals
F_DEBUG = False
HEXBIN_TRANS = {'0': '0000', '1': '0001', '2': '0010', '3': '0011', '4': '0100', '5': '0101', '6': '0110', '7': '0111', '8': '1000', '9': '1001', 'a': '1010', 'b': '1011', 'c': '1100', 'd': '1101', 'e': '1110', 'f': '1111', 'A': '1010', 'B': '1011', 'C': '1100', 'D': '1101', 'E': '1110', 'F': '1111'}
class Forensie:
# Custom Exceptions
class FormatError(Exception):
pass
# Class Methods
def __init__(self, instr):
self.origStr = self.inputStr = instr
self.RemoveDelimiters()
self.endian = 'little'
self.PType = {'BE': 'Solaris 8 boot partition', 'BF': 'New Solaris x86 partition', 'BB': 'Boot Wizard hidden', 'BC': 'Acronis backup partition', '16': 'Hidden DOS 16-bit FAT >=32M', '4A': "Mark Aitchison's ALFS/THIN lightweight filesystem for DOS or AdaOS Aquila (Withdrawn)", '5C': 'Priam EDisk', '24': 'NEC DOS 3.x', '26': 'Reserved', '27': 'PQservice or Windows RE hidden partition or MirOS partition or RouterBOOT kernel partition', '20': 'Unused', '21': 'Reserved or Unused', '22': 'Unused', '23': 'Reserved', '1B': 'Hidden WIN95 OSR2 FAT32', '11': 'Hidden DOS 12-bit FAT or Leading Edge DOS 3.x logically sectored FAT', '10': 'OPUS (?)', '12': 'Configuration/diagnostics partition', '2A': 'AtheOS File System (AFS)', '2B': 'SyllableSecure (SylStor)', '14': 'Hidden DOS 16-bit FAT <32M or AST DOS with logically sectored FAT', 'F6': 'Storage Dimensions SpeedStor', '61': 'SpeedStor', '55': 'EZ-Drive', '54': 'Disk Manager 6.0 Dynamic Drive Overlay (DDO)', '57': 'DrivePro or VNDI Partition', '56': 'Golden Bow VFeature Partitioned Volume. or DM converted to EZ-BIOS or AT&T MS-DOS 3.x logically sectored FAT.', '51': 'OnTrack Disk Manager RW (DM6 Aux1) or Novell', '50': 'OnTrack Disk Manager (older versions) RO or Lynx RTOS or Native Oberon (alt)', '53': 'Disk Manager 6.0 Aux3', '52': 'CP/M or Microport SysV/AT', 'B4': 'HP Volume Expansion (SpeedStor variant)', 'B6': 'HP Volume Expansion (SpeedStor variant) or Corrupted Windows NT mirror set (master), FAT16 file system', 'B7': 'Corrupted Windows NT mirror set (master), NTFS file system or BSDI BSD/386 filesystem', 'B0': 'BootStar Dummy', 'B1': 'HP Volume Expansion (SpeedStor variant) or QNX Neutrino Power-Safe filesystem', 'B2': 'QNX Neutrino Power-Safe filesystem', 'B3': 'HP Volume Expansion (SpeedStor variant) or QNX Neutrino Power-Safe filesystem', '1E': 'Hidden WIN95 16-bit FAT, LBA-mapped', '1C': 'Hidden WIN95 OSR2 FAT32, LBA-mapped', 'B8': 'BSDI BSD/386 swap partition', '65': 'Novell Netware 386, 3.xx or 4.xx', '66': 'Novell Netware SMS Partition', '88': 'Linux plaintext partition table', '82': 'Prime or Solaris x86 or Linux swap', '3B': 'THEOS ver 4 extended partition', '3A': 'THEOS ver 4 4gb partition', '81': 'MINIX since 1.4b, early Linux or Mitac disk manager', '86': 'Old Linux RAID partition superblock or FAT16 volume set', '87': 'NTFS volume set', '84': 'OS/2 hidden C: drive or Hibernation partition', '85': 'Linux extended partition', '02': 'XENIX root', '03': 'XENIX /usr', '00': 'Empty', '01': 'DOS 12-bit FAT', '06': 'DOS 3.31+ 16-bit FAT (over 32M)', '07': 'Windows NT NTFS or OS/2 IFS (e.g., HPFS) or exFAT or Advanced Unix or QNX2.x pre-1988', '04': 'DOS 3.0+ 16-bit FAT (up to 32M)', '05': 'DOS 3.3+ Extended Partition', '08': 'OS/2 (v1.0-1.3 only) or AIX boot partition or SplitDrive or Commodore DOS or DELL partition spanning multiple drives or QNX 1.x and 2.x ("qny")', '09': 'AIX data partition or Coherent filesystem or QNX 1.x and 2.x ("qnz")', 'E8': 'LUKS', 'E5': 'Tandy MSDOS with logically sectored FAT', 'E4': 'SpeedStor 16-bit FAT extended partition < 1024 cyl.', 'F5': 'Prologue multi-volume partition', 'E6': 'Storage Dimensions SpeedStor', 'E1': 'DOS access or SpeedStor 12-bit FAT extended partition', 'E0': 'Reserved by STMicroelectronics for a filesystem called ST AVFS.', 'E3': 'DOS R/O or SpeedStor', 'EE': 'Indication that this legacy MBR is followed by an EFI header', 'ED': 'Unused', 'EF': 'Partition that contains an EFI file system', 'EC': 'SkyOS SkyFS', 'EB': 'BeOS BFS', '0B': 'WIN95 OSR2 FAT32', '0C': 'WIN95 OSR2 FAT32, LBA-mapped', '0A': 'OS/2 Boot Manager or Coherent swap partition or OPUS', '0F': 'WIN95: Extended partition, LBA-mapped', '0E': 'WIN95: DOS 16-bit FAT, LBA-mapped', '39': 'Plan 9 partition or THEOS ver 4 spanned partition', '38': 'THEOS ver 3.2 2gb partition', '8B': 'Legacy Fault Tolerant FAT32 volume', '8C': 'Legacy Fault Tolerant FAT32 volume using BIOS extd INT 13h', '31': 'Reserved', '8A': 'Linux Kernel Partition (used by AiR-BOOT)', '36': 'Reserved', '8D': 'Free FDISK 0.96+ hidden Primary DOS FAT12 partitition', '8E': 'Linux Logical Volume Manager partition', 'FA': 'Bochs', 'FB': 'VMware File System partition', 'FC': 'VMware Swap partition', '64': 'PC-ARMOUR protected partition or Novell Netware 286, 2.xx', 'FE': 'SpeedStor > 1024 cyl. or LANstep or IBM PS/2 IML (Initial Microcode Load) partition, located at the end of the disk. or Windows NT Disk Administrator hidden partition or Linux Logical Volume Manager partition (old)', 'FF': 'Xenix Bad Block Table', '67': 'Novell', '68': 'Novell', '69': 'Novell Netware 5+, Novell Netware NSS Partition', 'F9': 'pCache', 'FD': 'Linux raid partition with autodetect using persistent superblock', '9A': 'Free FDISK 0.96+ hidden Primary DOS FAT16 partitition (LBA)', '9B': 'Free FDISK 0.96+ hidden DOS extended partitition (LBA)', '9E': 'ForthOS partition', '9F': 'BSD/OS', 'C9': 'Reserved for DR-DOS 8.0+', 'C8': 'Reserved for DR-DOS 8.0+', 'C3': 'Hidden Linux swap', 'C2': 'Unused or Hidden Linux', 'C1': 'DRDOS/secured (FAT-12)', 'C0': 'CTOS or REAL/32 secure small partition or NTFT Partition or DR-DOS/Novell DOS secured partition', 'C7': 'Windows NT corrupted NTFS volume/stripe set or Syrinx boot', 'C6': 'DRDOS/secured (FAT-16, >= 32M) or Windows NT corrupted FAT16 volume/stripe set', 'C5': 'DRDOS/secured (extended)', 'C4': 'DRDOS/secured (FAT-16, < 32M)', 'CC': 'DR-DOS 7.04+ secured FAT32 (LBA)/', 'CB': 'DR-DOS 7.04+ secured FAT32 (CHS)/', 'CA': 'Reserved for DR-DOS 8.0+', 'CF': 'DR-DOS 7.04+ secured EXT DOS (LBA)/', 'CE': 'DR-DOS 7.04+ FAT16X (LBA)/', 'CD': 'CTOS Memdump?', '99': 'DCE376 logical drive', '98': 'Free FDISK 0.96+ hidden Primary DOS FAT32 partitition (LBA) or Datalight ROM-DOS Super-Boot Partition', '91': 'Free FDISK 0.96+ hidden DOS extended partitition', '90': 'Free FDISK 0.96+ hidden Primary DOS FAT16 partitition', '93': 'Hidden Linux native partition or Amoeba', '92': 'Free FDISK 0.96+ hidden Primary DOS large FAT16 partitition', '95': 'MIT EXOPC native partitions', '94': 'Amoeba bad block table', '97': 'Free FDISK 0.96+ hidden Primary DOS FAT32 partitition', '96': 'CHRP ISO-9660 filesystem', 'F0': 'Linux/PA-RISC boot loader', 'F1': 'Storage Dimensions SpeedStor', 'F2': 'DOS 3.3+ secondary partition', 'F3': 'Reserved', 'F4': 'SpeedStor large partition or Prologue single-volume partition', '6E': '??', '17': 'Hidden IFS (e.g., HPFS)', 'F7': 'DDRdrive Solid State File System', '19': 'Unused', '18': 'AST SmartSleep Partition', '33': 'Reserved', '32': 'NOS', 'DF': 'DG/UX virtual disk manager partition or BootIt EMBRM', 'DD': 'Hidden CTOS Memdump?', 'DE': 'Dell PowerEdge Server utilities (FAT fs)', 'DB': 'Digital Research CP/M, Concurrent CP/M, Concurrent DOS or CTOS (Convergent Technologies OS -Unisys) or KDG Telemetry SCPU boot', 'DA': 'Non-FS Data or Powercopy Backup', '3C': 'PartitionMagic recovery partition', '35': 'JFS on OS/2 or eCS', '83': 'Linux native partition', '34': 'Reserved', '80': 'MINIX until 1.4a', '7F': 'Unused', '7E': 'Unused', '48': 'EUMEL/Elan', '63': 'Unix System V (SCO, ISC Unix, UnixWare, ...), Mach, GNU Hurd', '47': 'EUMEL/Elan', '44': 'GoBack partition', '45': 'Boot-US boot manager or Priam or EUMEL/Elan', '42': 'Linux swap (sharing disk with DRDOS) or SFS (Secure Filesystem) or Windows 2000 dynamic extended partition marker', '43': 'Linux native (sharing disk with DRDOS)', '40': 'Venix 80286 or PICK', '41': 'Linux/MINIX (sharing disk with DRDOS) or Personal RISC Boot or PPC PReP (Power PC Reference Platform) Boot', 'A1': 'Laptop hibernation partition or HP Volume Expansion (SpeedStor variant)', 'A0': 'Laptop hibernation partition', 'A3': 'HP Volume Expansion (SpeedStor variant)', 'A5': 'BSD/386, 386BSD, NetBSD, FreeBSD', 'A4': 'HP Volume Expansion (SpeedStor variant)', 'A7': 'NeXTStep', 'A6': 'OpenBSD or HP Volume Expansion (SpeedStor variant)', 'A9': 'NetBSD', 'A8': 'Mac OS-X', '3D': 'Hidden NetWare', 'AA': 'Olivetti Fat 12 1.44MB Service Partition', 'AB': 'Mac OS-X Boot partition or GO! partition', 'AE': 'ShagOS filesystem', 'AF': 'ShagOS swap partition or MacOS X HFS', '77': 'M2FS/M2CS partition or VNDI Partition', '76': 'Reserved', '75': 'IBM PC/IX', '74': 'Reserved or Scramdisk partition', '73': 'Reserved', '72': 'V7/x86', '71': 'Reserved', '70': 'DiskSecure Multi-Boot', '4F': 'QNX4.x 3rd part or Oberon partition', '4D': 'QNX4.x', '4E': 'QNX4.x 2nd part', '4C': 'Oberon partition', '78': 'XOSL FS', '46': 'EUMEL/Elan', 'D8': 'CP/M-86', 'D6': 'Old Multiuser DOS secured FAT16 >=32M', 'D4': 'Old Multiuser DOS secured FAT16 <32M', 'D5': 'Old Multiuser DOS secured extended partition', 'D0': 'REAL/32 secure big partition or Multiuser DOS secured partition', 'D1': 'Old Multiuser DOS secured FAT12'}
self.supported_commands = ('hex', 'mbr', 'fat-vbr', 'date', 'time', 'datetime', 'little-endian', 'big-endian')
self.command_descriptions = {
'hex': 'The default interpretation mode. Presents the input in a fixed-width font along with its ASCII decoding. All other commands fall back to this interpretation if an error occurs.',
'mbr': 'Analyzes the given input as a Master Boot Record.',
'fat-vbr': 'Analyzes the given input as a Boot Sector for an FAT volume.',
'date': 'Analyzes the given input as an FAT date value.',
'time': 'Analyzes the given input as an FAT time value.',
'datetime': 'Analyzes the given input as FAT date and time values.',
'little-endian': 'A sub-command, to be used with one of the other commands above. Tells Forensie to process the input for little endian format.',
'big-endian': 'A sub-command, to be used with one of the other commands above. Tells Forensie to process the input for big endian format.'}
def RemoveDelimiters(self):
"""Removes standard delimiting characters from the input text. No
return value. Instead, self.inputStr is changed directly.
"""
badchars = ' -|\n'
try:
self.inputStr = str(self.inputStr).translate(None, badchars) # Removes all spaces and pipes from input string
except(TypeError): # Python 2.5 doesn't support None translation tables...
self.inputStr = str(self.inputStr).translate(string.maketrans('', ''), badchars)
def ProcessInput(self, repeatOnFail=0):
"""Processes the text provided to the constructor. This method is the
flagship of Forensie. All processing of text springboards from this
point. First the type of the input is determined, hex, binary, or
other. Binary text is converted to hex before futher processing. If
neither binary nor hex text is found, the first line is parsed for
commands and the rest of the lines are reprocessed for binary or
hex text in a recursive manner. All commands are kept cummulatively
and are processed in the order they were discovered.
Returns a tuple in the form (printable results, 'success' or 'fail',
type of processing done to input).
"""
# Check input format
self.CheckBinary()
self.bytes = []
if self.format[:3] == 'bad' or self.format != 'hex':
if type(repeatOnFail) == int:
line = 2+repeatOnFail # Blip always starts with '\n'; don't want to count that line
#print "Trying to split the lines...",
tmp = self.origStr.split('\n', line)
try:
self.inputStr = StrFlatten(tmp[line:])
except IndexError: # No more lines to take off
print "Failed on try %d" % repeatOnFail
return ('Parsed all lines unsuccessfully.', 'fail', None)
else:
#print 'Bad input format: %s\nTrying again, #%d' % (self.format, repeatOnFail+1)
self.GetCommand(tmp[:line])
self.RemoveDelimiters()
return self.ProcessInput(repeatOnFail+1)
else:
return ('', 'fail', None)
# Text is in an acceptable format. Process according to a command or process as hex text.
try:
return self.ExecuteCommand()
except self.FormatError, m:
if F_DEBUG: print m
return ('%s%s'%(m, self.DecodeHex()), 'success', 'hex')
def ExecuteCommand(self):
"""Processes the input text according to the stored command(s). Calls
the method corresponding to the first command given. Raises a
FormatError if no valid command is found. If called from
ProcessInput(), this will cause the text to be processed as plain
hex.
Returns the value returned from whichever method is called.
"""
# Process according to first given command
try:
com = self.commands[0]
except IndexError: # No command found
raise self.FormatError('No command found.')
except AttributeError: # No command found
raise self.FormatError('No command found.')
if 'little-endian' in self.commands:
self.endian = 'little'
elif 'big-endian' in self.commands:
self.endian = 'big'
if com == 'mbr':
return self.MBRdetector()
elif self.commands[0] == 'fat-vbr':
return self.FATVBRdetector()
elif com == 'date':
#if 'fat' in self.commands:
return self.DateDetector()
elif com == 'time':
return self.TimeDetector()
elif com == 'datetime':
return self.DateTimeDetector()
elif com == 'hex':
return (self.DecodeHex(), 'success', 'hex')
else: raise self.FormatError('An unsupported command passed through: %s' % com)
def MBRdetector(self):
"""Entry point for interpreting the input as an MBR. Begins processing
the input text and determines if the length and boot signature are
valid before proceeding. Splits the four primary partition tables
and sends them each to GetPartInfo(). The information from these
four entries together make up the bulk of the valuable information
in an MBR, so it is sent directly to FormatPartInfo().
Returns a regular success tuple (see ProcessInput()).
"""
self.bytes = ChopHexStr(self.inputStr)
# Check length of string
if len(self.bytes) != 512:
raise self.FormatError('MBR is of improper length: %d, expected 512' % len(self.bytes))
# Determine Endian
if [self.bytes[-2], self.bytes[-1].upper()] == ['55', 'AA']:
# Little Endian
self.endian = 'little'
elif [self.bytes[-2].upper(), self.bytes[-1]] == ['AA', '55']:
# Big Endian
self.endian = 'big'
else:
#print 'MBR does not have a valid signature'
raise self.FormatError('MBR does not have a valid signature.')
partitionsInfo = []
for n in range(4):
offset = 446+16*n # Boot code length + offset of the partitions already parsed
partEntry = self.bytes[offset:offset+16]
partitionsInfo += [self.GetPartInfo(partEntry)]
return (self.FormatPartInfo(partitionsInfo), 'success', 'mbr')
def DateDetector(self, alt=None, raw=False):
"""Detects and interprets an FAT date value. Alternate text can be
passed to alt if the calling code needs text other than the input
string to be processed. Also, if raw is True, the processing
results are not put into a string or the success tuple before being
returned.
Returns a regular success tuple (see ProcessInput()).
"""
if alt == None: alt = self.inputStr
if len(alt) != 4: raise self.FormatError('Date value has improper length of %d, expected 4' % len(alt))
result = self.GetDate(Hex2Dec(alt, self.endian))
if raw: return result # Unformatted (raw) results requested
return ('Date Value: %d %s, %d\nProcessed for %s endian format' % (result+(self.endian,)), 'success', 'date')
def GetDate(self, dateNum):
"""Performs the calculation necessary for converting a given dateNum
value to a tuple in the format (day, month name, year). Raises
self.FormatError on any invalid values.
"""
if type(dateNum) != int:
raise self.FormatError("While converting a date value, received unexpected input type: %s, expected 'int'" %type(dateNum))
day = dateNum & int('11111', 2)
month = (dateNum & int('111100000', 2)) >> 5
year = (dateNum >> 9) + 1980
months = [None, 'January', 'February', 'March', 'April', 'May', 'June', 'July', 'August', 'September', 'October', 'November', 'December']
if not (day and month):
raise self.FormatError('Invalid day or month value of 0 in number: %d' % dateNum)
try:
return (day, months[month], year)
except IndexError: # Indexed a month > 12
raise self.FormatError('Month value in date number %d is greater than 12: %d' % (dateNum, month))
def TimeDetector(self, alt=None, raw=False):
"""Detects and interprets an FAT time value. Alternate text can be
passed to alt if the calling code needs text other than the input
string to be processed. Also, if raw is True, the processing
results are not put into a string or the success tuple before being
returned. If the time value includes 100ths of seconds, these are
added to the values sent to GetTime().
Returns a regular success tuple (see ProcessInput()).
"""
if alt == None: alt = self.inputStr
msec = 0.0
off = 0
if len(alt) == 6: # Time includes microsecond units
msec = Hex2Dec(alt[:2], self.endian)/100.0
off = 2
elif len(alt) != 4: raise self.FormatError('Time value has improper length of %d, expected 4 or 6' % len(alt))
result = self.GetTime(Hex2Dec(alt[off:], self.endian), msec)
if raw: return result # Unformatted (raw) results requested
return ('Time Value: %02d:%02d:%05.2f\nProcessed for %s endian format' % (result+(self.endian,)), 'success', 'time')
def GetTime(self, timeNum, msec=0.0):
"""Performs the calculation necessary for converting a given timeNum
value to a tuple in the format (hour, minutes, seconds). Raises
self.FormatError on any invalid values.
"""
if type(timeNum) != int:
raise self.FormatError('While converting a time value, received unexpected input type: %s'%type(timeNum))
seconds = timeNum & int('11111', 2) # First 5 LSBs
minutes = (timeNum & int('11111100000', 2)) >> 5 # Next 6 bits minus first 5 LSBs
hour = timeNum >> 11 # Last 5 bits minus the first 11 LSBs
if hour > 23 or minutes > 59 or seconds > 29:
raise self.FormatError('Time value %d has an improper hour, minute, or second value: ' % timeNum)
return (hour, minutes, (2 * seconds)+msec)
def DateTimeDetector(self):
"""Detects and interprets an FAT date and time value by calling
DateDetector() and TimeDetector() with the raw option set to True.
Returns a regular success tuple (see ProcessInput()).
"""
# Acceptable length for this kind of processing is either 8 or 10
if len(self.inputStr) not in (8, 10): raise self.FormatError('Datetime value has improper length of %d, expected 8 or 10' % len(self.inputStr))
date = self.DateDetector(self.inputStr[-4:], True)
time = self.TimeDetector(self.inputStr[:-4], True)
return ('Time & Date Value: %02d:%02d:%05.2f, %d %s, %d\nProcessed for %s endian format' % (time+date+(self.endian,)), 'success', 'datetime')
def GetCommand(self, lines):
"""Searches lines for supported commands which are then stored in the
self.commands list. Any words that are not commands are simply
ignored.
No return value.
"""
coms = []
for line in lines:
#print line
if ">>" == line[:2]:
for x in line[2:].split():
if x.strip() == '': continue
elif x.lower() in self.supported_commands:
#print "Command found: %s" % x.lower()
coms += [x.lower()]
self.commands = coms
def GetPartInfo(self, entry):
"""Retrieves a single primary partition's information from entry,
stores it in a dictionary, and returns the dictionary. The entry
must be in standard IBM compatible partition table format.
"""
if self.endian == 'little': order = -1
else: order = 1
info = {}
if entry[0] == '00':
info['State'] = 'Inactive'
elif entry[0] == '80':
info['State'] = 'Active'
else:
info['State'] = 'Invalid value'
info['CHS Address'] = 'Head: %d; Cylinder: %d; Sector: %d' % self.GetCHS(entry)
try:
info['Partition Type'] = self.PType[entry[4]]
except KeyError: # Invalid entry, may be VBR instead of MBR
info['Partition Type'] = "Unknown - Input may be VBR instead of MBR"
#l = []
#for x in xrange(256):
# l.append('%02x' % x)
info['End CHS Address'] = 'Head: %d; Cylinder: %d; Sector: %d' % self.GetCHS(entry, True)
# LBA of first sector and Number of blocks: Not always used
lbaVals = []
if order == -1:
lbaVals = entry[11:7:-1]
elif order == 1:
lbaVals = entry[8:12]
lbaStr = ''
for x in lbaVals:
lbaStr += x
info['LBA of First Sector'] = int(lbaStr, 16)
blocksVals = []
if order == -1:
blocksVals = entry[15:11:-1]
elif order == 1:
blocksVals = entry[12:16]
blocksStr = ''
for x in blocksVals:
blocksStr += x
info['Number of Blocks'] = int(blocksStr, 16)
return info
def FATVBRdetector(self):
"""Entry point for interpreting the input as an FAT VBR. Begins
processing the input text and determines if the length and boot
signature are valid before proceeding. If the input text passes
these tests, it is passed on to GetFATInfo().
Returns a regular success tuple (see ProcessInput()).
"""
# Check size, signature, much like MBR
self.bytes = ChopHexStr(self.inputStr)
# Check length of string
if len(self.bytes) != 512:
raise self.FormatError('FAT VBR is of improper length: %d, expected 512' % len(self.bytes))
# Determine Endian
if [self.bytes[-2], self.bytes[-1].upper()] == ['55', 'AA']:
# Little Endian
self.endian = 'little'
elif [self.bytes[-2].upper(), self.bytes[-1]] == ['AA', '55']:
# Big Endian
self.endian = 'big'
else:
raise self.FormatError('FAT VBR does not have a valid signature.')
return (self.FormatFATInfo(self.GetFATInfo()), 'success', 'fat-mbr')
def GetFATInfo(self):
"""Interprets the information from the FAT boot sector and returns a
dictionary with it. Works with FAT12/16/32.
"""
# Copy byte information
byt = self.bytes
fatInfo = {}
fatdata = (('OEM Name', 3, 8, 'ascii'),
('Bytes per Sector', 11, 2, 'num'),
('Sectors per Cluster', 13, 1, 'num'),
('Reserved Sector Count', 14, 2, 'num'),
('Number of FATs', 16, 1, 'num'),
('Sectors per track', 24, 2, 'num'),
('Number of Heads', 26, 2, 'num'),
('Hidden Sectors Preceeding', 28, 4, 'num'))
for w, x, y, z in fatdata:
self.AddFATInfo(fatInfo, byt, w, x, y, z)
# Total Sectors Calculation
ttlSectors = Hex2Dec(StrFlatten(byt[19:19+2]), self.endian)
if not ttlSectors:
ttlSectors = Hex2Dec(StrFlatten(byt[32:32+4]), self.endian)
fatInfo['Total Sectors'] = ttlSectors
# Media Descriptor Code Interpretation
medDesc = Hex2Dec(byt[21], self.endian)
if medDesc == 240:
fatInfo['Media Descriptor'] = '3.5" Double Sided, 80 tracks per side, 18 or 36 sectors per track (1.44MB or 2.88MB). 5.25" Double Sided, 80 tracks per side, 15 sectors per track (1.2MB). Used also for other media types.'
elif medDesc == 248:
fatInfo['Media Descriptor'] = 'Fixed disk (i.e. Hard disk).'
elif medDesc == 249:
fatInfo['Media Descriptor'] = '3.5" Double sided, 80 tracks per side, 9 sectors per track (720K). 5.25" Double sided, 80 tracks per side, 15 sectors per track (1.2MB)'
elif medDesc == 250:
fatInfo['Media Descriptor'] = '5.25" Single sided, 80 tracks per side, 8 sectors per track (320K)'
elif medDesc == 251:
fatInfo['Media Descriptor'] = '3.5" Double sided, 80 tracks per side, 8 sectors per track (640K)'
elif medDesc == 252:
fatInfo['Media Descriptor'] = '5.25" Single sided, 40 tracks per side, 9 sectors per track (180K)'
elif medDesc == 253:
fatInfo['Media Descriptor'] = '5.25" Double sided, 40 tracks per side, 9 sectors per track (360K). Also used for 8".'
elif medDesc == 254:
fatInfo['Media Descriptor'] = '5.25" Single sided, 40 tracks per side, 8 sectors per track (160K). Also used for 8".'
elif medDesc == 255:
fatInfo['Media Descriptor'] = '5.25" Single sided, 40 tracks per side, 8 sectors per track (160K). Also used for 8".'
else:
fatInfo['Media Descriptor'] = 'Invalid Value'
# For remaining bytes, determine if FAT 12, 16, or 32
if byt[17] == 0:
# FAT32
fat32data = (('Sectors per FAT', 36, 4, 'num'),
('Version', 42, 2, 'num'),
('Cluster # of Root Dir', 44, 4, 'num'),
('Sector # of FS Information', 48, 2, 'num'))
backupSec = Hex2Dec(StrFlatten(byt[50:50+2]), self.endian)
if backupSec:
fatInfo['Sector # of Boot Sector Backup'] = backupSec
if byt[64] == '00':
fatInfo['Drive Type'] = 'Removable Media'
elif byt[64] == '80':
fatInfo['Drive Type'] = 'Hard Disk'
if byt[66] == '29': # Extended boot signature is valid
fat32data = fat32data + (('Serial Number', 67, 4, 'num'),
('Volume Label', 71, 11, 'ascii'),
('File System Type', 82, 8, 'ascii'))
for w, x, y, z in fat32data:
self.AddFATInfo(fatInfo, byt, w, x, y, z)
else:
# Process FAT12 and FAT16 the same way
fat16data = (('Max Root Directories', 17, 2, 'num'), ('Sectors per FAT', 22, 2, 'num'))
if byt[36] == '00':
fatInfo['Drive Type'] = 'Removable Media'
elif byt[36] == '80':
fatInfo['Drive Type'] = 'Hard Disk'
if byt[38] == '29': # Extended boot signature is valid
fat16data = fat16data + (('Serial Number', 39, 4, 'num'),
('Volume Label', 43, 11, 'ascii'),
('File System Type', 54, 8, 'ascii'))
for w, x, y, z in fat16data:
self.AddFATInfo(fatInfo, byt, w, x, y, z)
return fatInfo
def AddFATInfo(self, obj, byt, label, off, olen, dtype):
"""To simplify the commands in GetFATInfo(), this method accepts a
dictionary object, the bytes from the VBR, the string to be the
dictionary key, the byte offset of the data, the length of the data,
and the data type which should be either 'ascii' or 'num'. No
return value because the dictionary is added to directly.
"""
if dtype == 'ascii':
obj[label] = self.DecodeHex(StrFlatten(byt[off:off+olen]), float('inf'), False)
elif dtype == 'num':
obj[label] = Hex2Dec(StrFlatten(byt[off:off+olen]), self.endian)
else:
raise self.FormatError("Invalid data type passed to AddFATInfo: %s, expected 'ascii' or 'num'" % dtype)
def FormatFATInfo(self, info):
"""Puts all the data from the dictionary info into a string for
printing. The categories are defined in a local tuple and looped
through. All categories that do not exist in info are skipped.
Returns the string for printing.
"""
cats = ('OEM Name', 'Volume Label', 'Drive Type', 'Media Descriptor', 'File System Type', 'Bytes per Sector', 'Sectors per Cluster', 'Sectors per track', 'Number of Heads', 'Total Sectors', 'Hidden Sectors Preceeding', 'Reserved Sector Count', 'Sectors per FAT', 'Number of FATs', 'Cluster # of Root Dir', 'Max Root Directories', 'Sector # of FS Information', 'Sector # of Boot Sector Backup', 'Version')
end = 'Translation of FAT VBR:\n\n'
for cat in cats:
try:
end += '%s: %s\n' % (cat, info[cat])
except KeyError:
continue
return end
def GetCHS(self, entry, end=False):
"""Retrieves the Cylinder Head Sector address from an MBR partition
table entry. Since both the start CHS and end CHS are calculated
the same way, if end is True, the offset will be changed so the
proper values are put into the calculation.
Returns a tuple in the form (head, cylinder, sector).
"""
if not end:
h, c, s = 1, 2, 3
else:
h, c, s = 5, 6, 7
cyl = int(entry[c], 16)&0xc0*4 + int(entry[s], 16)
sec = int(entry[s], 16)&0x3f
return (int(entry[h], 16), cyl, sec)
def GetBytes(self, row, width, hexstr=None):
"""Returns a string of the hex bytes from the input string for the
given row and width. Separates the bytes with a space.
"""
if hexstr == None:
self.bytes = ChopHexStr(self.inputStr) # Make sure we have the input string parsed into bytes
else:
self.bytes = ChopHexStr(hexstr)
end = ''
for byte in self.bytes[row*width:(row+1)*width]:
end += '%s ' % byte
return end[:-1].ljust(width*3-1) # Take off last space before returning, make same width as other rows
def DecodeHex(self, numstr=None, width=16, pretty=True):
"""Returns the decoded hex string of numstr with newlines after width
characters. Replaces any non-printing characters with a period. If
width is infinity, no newline characters are inserted. If pretty is
True (default), the hex byte offset is printed in a column to the
left, the hex bytes are printed in a center column, the ASCII
decoding is in a column on the right, and a header with labels for
the first two columns are all added to the string.
"""
from math import ceil, log
if numstr == None: numstr = self.inputStr
tmp = ''
for ch in numstr.decode('hex'):
tmp2 = sub(r'\\x..', '.', repr(ch)[1:-1])
if len(tmp2) == 1: tmp += tmp2
else: tmp += '.'
if pretty: # Does a 'pretty print' of the decoded values
strLength = len(tmp)
offsetWidth = max(int(ceil(log(strLength, width))), 4) # At least 4, otherwise log
byteOffs = ''
for x in xrange(width):
byteOffs += ('%02x' % x).upper()
end = '\n\n%s %s\n%s %s %s\n' % ('Offset'.center(offsetWidth+2), self.GetBytes(0, width, byteOffs), '-'*(offsetWidth+2), '-'*(width*3+1), '-'*(width+1))
for x in xrange(int(ceil(float(strLength)/width))):
offset = ('%x'%(x*width)).rjust(offsetWidth, '0')
end += ' %s | %s | %s\n' % (offset, self.GetBytes(x, width), tmp[x*width:(x+1)*width])
return end
elif width == float('inf'):
return tmp
else:
return '\n'.join(findall('.{1,%d}'%width, tmp)) # Inserts a newline after width characters
def CheckHex(self):
"""Determines if the input string has only valid hex characters.
Usually this method is called from CheckBinary(), which calls this
method when its test for binary characters fails.
No return value. Instead self.format is set to 'hex' or 'bad'.
"""
mbr = self.inputStr # Make a copy of the MBR string
for ch in mbr:
if ch not in string.hexdigits:
self.format = "bad: contains '%s'" % ch
return
self.format = 'hex'
def CheckBinary(self):
"""Determines if the input string has only valid binary characters. If
the test fails, this method calls CheckHex(). If the test passes,
the text is sent to be converted to hex before further processing,
which simplifies the writing of all other methods which process the
input text. This method is the first before CheckHex() because it
will fail earlier on non-binary character strings given that the
set of valid characters = {'1', '0'}.
No return value. Instead self.format is set to 'hex'.
"""
mbr = self.inputStr # Make a copy of the MBR string
for ch in mbr:
if ch not in '01':
return self.CheckHex()
self.inputStr = Bin2Hex(mbr) # MBR string is in binary, convert to hex
self.format = 'hex'
def FormatPartInfo(self, info):
"""Accepts a list (info) that has the information for the four
partitions in the disk and formats them for being printed. Uses the
sector counts to determine if any unallocated spaces exist between
the partitions and inserts a notice at the top of the printed
results if any are found.
Returns the string.
"""
cats = ('State', 'CHS Address', 'Partition Type', 'End CHS Address', 'LBA of First Sector', 'Number of Blocks')
end = 'Translation of MBR:\n\n'
# Look for empty spaces inbetween partitions
lba = 'LBA of First Sector'
num = 'Number of Blocks'
ilen = len(info)
info.append({lba: '0', num: '1'})
for n in xrange(ilen):
diff = int(info[n][lba])-(int(info[n-1][lba])+int(info[n-1][num]))
if diff > 0:
end += '--NOTICE--\n%d sectors are unallocated in between partitions %d and %d\n\n' % (diff, n, n+1)
info.pop()
for n in xrange(len(info)): # Loop through the 4 partitions
end += "Partition %d\n\n" % (n+1)
for cat in cats:
end += "%s: %s\n" % (cat, info[n][cat])
end += "\n"
return end
def GetCommandDescriptions(self):
"""Returns a string with all supported commands in the format: \n\ncommand: description\ncommand: description\n...
"""
end = ''
for k in self.supported_commands:
end += '\n%s: %s' % (k, self.command_descriptions[k])
return end
def StrFlatten(mylist):
"""Takes a list of strings, mylist, and makes one string from it. Returns
the string.
"""
if mylist == []: raise IndexError
s = ''
for x in mylist:
s += '%s' % x
return s
def Bin2Hex(numstr):
"""Converts the given binary numstr (string) to a hex string. Returns the
string.
"""
result = ''
while len(numstr):
bstr = numstr[:8]
numstr = numstr[8:]
tmpval = 0
for x in bstr:
tmpval *= 2
tmpval += int(x)
result += '%x' % tmpval
return result
def Hex2Dec(numstr, endian='little'):
"""Takes a hex number string and converts it to a decimal number. Returns
the number.
"""
count, num = 0, 0
# Convert Little Endian number before processing
if endian == 'little':
newstr = ''
while len(numstr):
newstr = numstr[:2] + newstr
numstr = numstr[2:]
numstr = newstr
#print "Converted Little Endian number to %s" % numstr
for n in numstr[::-1]:
num += string.hexdigits.index(n.lower()) * 16**count
count += 1
return num
def ChopHexStr(hexstr):
"""Takes a hex string and divides it into strings of length 2 (each
representing a byte) and places them in a list. Returns the list.
"""
tmp = hexstr # Make a copy of the input string
bytes = []
# Break hexstr into bytes and put each byte in the 'bytes' list
while tmp:
bytes += [(tmp[:2])]
tmp = tmp[2:]
return bytes
def Hex2BinStr(hexstr, endian='little'):
"""
Accepts a hex text string and translates it into a binary text string.
Returns the binary text string.
"""
binstr = ''
dir = 1
#if endian == 'little':
# dir = -1
for ch in hexstr[::dir]:
binstr += HEXBIN_TRANS[ch]
return binstr
if __name__ == '__main__':
mbr = '33 C0 8E D0 BC 00 7C FB 50 07 50 1F FC BE 1B 7C BF 1B 06 50 57 B9 E5 01 F3 A4 CB BD BE 07 B1 04 38 6E 00 7C 09 75 13 83 C5 10 E2 F4 CD 18 8B F5 83 C6 10 49 74 19 38 2C 74 F6 A0 B5 07 B4 07 8B F0 AC 3C 00 74 FC BB 07 00 B4 0E CD 10 EB F2 88 4E 10 E8 46 00 73 2A FE 46 10 80 7E 04 0B 74 0B 80 7E 04 0C 74 05 A0 B6 07 75 D2 80 46 02 06 83 46 08 06 83 56 0A 00 E8 21 00 73 05 A0 B6 07 EB BC 81 3E FE 7D 55 AA 74 0B 80 7E 10 00 74 C8 A0 B7 07 EB A9 8B FC 1E 57 8B F5 CB BF 05 00 8A 56 00 B4 08 CD 13 72 23 8A C1 24 3F 98 8A DE 8A FC 43 F7 E3 8B D1 86 D6 B1 06 D2 EE 42 F7 E2 39 56 0A 77 23 72 05 39 46 08 73 1C B8 01 02 BB 00 7C 8B 4E 02 8B 56 00 CD 13 73 51 4F 74 4E 32 E4 8A 56 00 CD 13 EB E4 8A 56 00 60 BB AA 55 B4 41 CD 13 72 36 81 FB 55 AA 75 30 F6 C1 01 74 2B 61 60 6A 00 6A 00 FF 76 0A FF 76 08 6A 00 68 00 7C 6A 01 6A 10 B4 42 8B F4 CD 13 61 61 73 0E 4F 74 0B 32 E4 8A 56 00 CD 13 EB D6 61 F9 C3 49 6E 76 61 6C 69 64 20 70 61 72 74 69 74 69 6F 6E 20 74 61 62 6C 65 00 45 72 72 6F 72 20 6C 6F 61 64 69 6E 67 20 6F 70 65 72 61 74 69 6E 67 20 73 79 73 74 65 6D 00 4D 69 73 73 69 6E 67 20 6F 70 65 72 61 74 69 6E 67 20 73 79 73 74 65 6D 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 2C 44 63 8F D5 32 82 00 00 00 01 01 00 DE FE 3F 08 3F 00 00 00 8A 34 02 00 80 00 01 09 07 FE FF FF C9 34 02 00 B0 1A 18 1D 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 55 AA'
foren = Forensie(mbr)
print foren.ProcessInput() | Python |
# Wave Imports
from waveapi import events
from waveapi import robot
from waveapi import appengine_robot_runner
import logging
# Other Imports
from Forensie import Forensie
from re import sub
def OnWaveletSelfAdded(event, wavelet):
"""
When Forensie is added to an existing wave, this method posts
information to the wave about supported commands and how to use
Forensie.
"""
foren = Forensie('')
wavelet.reply("Welcome to Forensie!\n\nThe following are valid commands:\n%s\n\nEach command must be entered in the following format at the beginning of a line, and must begin with the '>>' characters:\n\n>> main-command [sub-command]" % foren.GetCommandDescriptions())
def OnBlipSubmitted(event, wavelet):
"""
When a blip is submitted to a wave where Forensie is a participant,
this method captures the new input, sends it to a Forensie instance for
processing, and in the event that the processing was a success, posts
the results to the wave.
If there is already a post authored by Forensie, that post's contents
will be replaced by the results obtained (determined by searching the
list returned from getChildBlipIds()).
If the processed text was simple hex text, the results will be given a
fixed-width font so that it prints properly. In the future, it would be
nice to be able to separate such results into columns or a table. This
would allow the user to select hex or ascii text only.
"""
# Decision engine for processing/posting called here
mbr = event.blip.text
foren = Forensie(mbr)
# Get text to post in reply here
text, status, texttype = foren.ProcessInput()
if status == 'fail':
# Look for an attachment ?
for e in event.blip.elements:
if e.type == 'IMAGE':
print "File properties:\n%s" % e.properties
return
# Look through all the blips of the parent wavelet to see if we've already posted a translation
changeTranslation = False
child_blip_info = getChildBlipIds(event.blip)
for c in child_blip_info:
if c[1] == 'forensie@appspot.com':
if changeTranslation: # Something funky happened. Delete other post.
wavelet.delete(c[0])
continue
changeTranslation = True
if status == 'success':
ref = c[0].all() # Get a reference to the entire previous post
ref.delete() # Delete previous post's contents
c[0].append(text) # Append to the previous post which is now empty
r = c[0].all()
r.clear_annotation("style/fontFamily")
if texttype == 'hex':
r.annotate("style/fontFamily", "monospace")
else:
# Wasn't able to translate or interpret the post. Delete the old one.
if len(list(c[0].contributors)) == 1: wavelet.delete(c[0])
else: print c[0].contributors
# No previous post found
if not changeTranslation and status == 'success':
r = event.blip.reply()
r.append(text)
r.all().clear_annotation("style/fontFamily")
if texttype == 'hex':
# Annotate
r.all().annotate("style/fontFamily", "monospace")
def getChildBlipIds(blip):
"""
Loops through the children of blip recursively and returns a list of
tuples with information on each child in the form (blip, creator of
blip, first 20 characters of text in blip).
"""
data = []
if type(blip) == unicode: return [('Unicode Blip', 'None', blip)]
for bchil in blip.child_blips:
data += getChildBlipIds(bchil)
return [(blip, blip.creator, blip.text[:20])] + data
if __name__ == '__main__':
myRobot = robot.Robot('Forensie',
image_url='http://forensie.appspot.com/assets/hdd_head1.jpg',
profile_url='http://code.google.com/p/forensie/')
myRobot.register_handler(events.BlipSubmitted, OnBlipSubmitted, context=[events.Context.ALL])
myRobot.register_handler(events.WaveletSelfAdded, OnWaveletSelfAdded)
appengine_robot_runner.run(myRobot) | Python |
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tool for uploading diffs from a version control system to the codereview app.
Usage summary: upload.py [options] [-- diff_options]
Diff options are passed to the diff command of the underlying system.
Supported version control systems:
Git
Mercurial
Subversion
It is important for Git/Mercurial users to specify a tree/node/branch to diff
against by using the '--rev' option.
"""
# This code is derived from appcfg.py in the App Engine SDK (open source),
# and from ASPN recipe #146306.
import cookielib
import getpass
import logging
import md5
import mimetypes
import optparse
import os
import re
import socket
import subprocess
import sys
import urllib
import urllib2
import urlparse
import tempfile
try:
import readline
except ImportError:
logging.debug("readline not found.")
pass
# The logging verbosity:
# 0: Errors only.
# 1: Status messages.
# 2: Info logs.
# 3: Debug logs.
verbosity = 1
# Max size of patch or base file.
MAX_UPLOAD_SIZE = 900 * 1024
def AreYouSureOrExit(exit_if_no=True):
prompt = "Are you sure you want to continue?(y/N) "
answer = raw_input(prompt).strip()
if exit_if_no and answer.lower() != "y":
ErrorExit("User aborted")
return answer.lower() == "y"
def GetEmail(prompt):
"""Prompts the user for their email address and returns it.
The last used email address is saved to a file and offered up as a suggestion
to the user. If the user presses enter without typing in anything the last
used email address is used. If the user enters a new address, it is saved
for next time we prompt.
"""
last_email_file_name = os.path.expanduser("~/.last_codereview_email_address")
last_email = ""
if os.path.exists(last_email_file_name):
try:
last_email_file = open(last_email_file_name, "r")
last_email = last_email_file.readline().strip("\n")
last_email_file.close()
prompt += " [%s]" % last_email
except IOError, e:
pass
email = raw_input(prompt + ": ").strip()
if email:
try:
last_email_file = open(last_email_file_name, "w")
last_email_file.write(email)
last_email_file.close()
except IOError, e:
pass
else:
email = last_email
return email
def StatusUpdate(msg):
"""Print a status message to stdout.
If 'verbosity' is greater than 0, print the message.
Args:
msg: The string to print.
"""
if verbosity > 0:
print msg
def ErrorExit(msg):
"""Print an error message to stderr and exit."""
print >>sys.stderr, msg
sys.exit(1)
class ClientLoginError(urllib2.HTTPError):
"""Raised to indicate there was an error authenticating with ClientLogin."""
def __init__(self, url, code, msg, headers, args):
urllib2.HTTPError.__init__(self, url, code, msg, headers, None)
self.args = args
self.reason = args["Error"]
class AbstractRpcServer(object):
"""Provides a common interface for a simple RPC server."""
def __init__(self, host, auth_function, host_override=None, extra_headers={},
save_cookies=False):
"""Creates a new HttpRpcServer.
Args:
host: The host to send requests to.
auth_function: A function that takes no arguments and returns an
(email, password) tuple when called. Will be called if authentication
is required.
host_override: The host header to send to the server (defaults to host).
extra_headers: A dict of extra headers to append to every request.
save_cookies: If True, save the authentication cookies to local disk.
If False, use an in-memory cookiejar instead. Subclasses must
implement this functionality. Defaults to False.
"""
self.host = host
self.host_override = host_override
self.auth_function = auth_function
self.authenticated = False
self.extra_headers = extra_headers
self.save_cookies = save_cookies
self.opener = self._GetOpener()
if self.host_override:
logging.info("Server: %s; Host: %s", self.host, self.host_override)
else:
logging.info("Server: %s", self.host)
def _GetOpener(self):
"""Returns an OpenerDirector for making HTTP requests.
Returns:
A urllib2.OpenerDirector object.
"""
raise NotImplementedError()
def _CreateRequest(self, url, data=None):
"""Creates a new urllib request."""
logging.debug("Creating request for: '%s' with payload:\n%s", url, data)
req = urllib2.Request(url, data=data)
if self.host_override:
req.add_header("Host", self.host_override)
for key, value in self.extra_headers.iteritems():
req.add_header(key, value)
return req
def _GetAuthToken(self, email, password):
"""Uses ClientLogin to authenticate the user, returning an auth token.
Args:
email: The user's email address
password: The user's password
Raises:
ClientLoginError: If there was an error authenticating with ClientLogin.
HTTPError: If there was some other form of HTTP error.
Returns:
The authentication token returned by ClientLogin.
"""
account_type = "GOOGLE"
if self.host.endswith(".google.com"):
# Needed for use inside Google.
account_type = "HOSTED"
req = self._CreateRequest(
url="https://www.google.com/accounts/ClientLogin",
data=urllib.urlencode({
"Email": email,
"Passwd": password,
"service": "ah",
"source": "rietveld-codereview-upload",
"accountType": account_type,
}),
)
try:
response = self.opener.open(req)
response_body = response.read()
response_dict = dict(x.split("=")
for x in response_body.split("\n") if x)
return response_dict["Auth"]
except urllib2.HTTPError, e:
if e.code == 403:
body = e.read()
response_dict = dict(x.split("=", 1) for x in body.split("\n") if x)
raise ClientLoginError(req.get_full_url(), e.code, e.msg,
e.headers, response_dict)
else:
raise
def _GetAuthCookie(self, auth_token):
"""Fetches authentication cookies for an authentication token.
Args:
auth_token: The authentication token returned by ClientLogin.
Raises:
HTTPError: If there was an error fetching the authentication cookies.
"""
# This is a dummy value to allow us to identify when we're successful.
continue_location = "http://localhost/"
args = {"continue": continue_location, "auth": auth_token}
req = self._CreateRequest("http://%s/_ah/login?%s" %
(self.host, urllib.urlencode(args)))
try:
response = self.opener.open(req)
except urllib2.HTTPError, e:
response = e
if (response.code != 302 or
response.info()["location"] != continue_location):
raise urllib2.HTTPError(req.get_full_url(), response.code, response.msg,
response.headers, response.fp)
self.authenticated = True
def _Authenticate(self):
"""Authenticates the user.
The authentication process works as follows:
1) We get a username and password from the user
2) We use ClientLogin to obtain an AUTH token for the user
(see http://code.google.com/apis/accounts/AuthForInstalledApps.html).
3) We pass the auth token to /_ah/login on the server to obtain an
authentication cookie. If login was successful, it tries to redirect
us to the URL we provided.
If we attempt to access the upload API without first obtaining an
authentication cookie, it returns a 401 response and directs us to
authenticate ourselves with ClientLogin.
"""
for i in range(3):
credentials = self.auth_function()
try:
auth_token = self._GetAuthToken(credentials[0], credentials[1])
except ClientLoginError, e:
if e.reason == "BadAuthentication":
print >>sys.stderr, "Invalid username or password."
continue
if e.reason == "CaptchaRequired":
print >>sys.stderr, (
"Please go to\n"
"https://www.google.com/accounts/DisplayUnlockCaptcha\n"
"and verify you are a human. Then try again.")
break
if e.reason == "NotVerified":
print >>sys.stderr, "Account not verified."
break
if e.reason == "TermsNotAgreed":
print >>sys.stderr, "User has not agreed to TOS."
break
if e.reason == "AccountDeleted":
print >>sys.stderr, "The user account has been deleted."
break
if e.reason == "AccountDisabled":
print >>sys.stderr, "The user account has been disabled."
break
if e.reason == "ServiceDisabled":
print >>sys.stderr, ("The user's access to the service has been "
"disabled.")
break
if e.reason == "ServiceUnavailable":
print >>sys.stderr, "The service is not available; try again later."
break
raise
self._GetAuthCookie(auth_token)
return
def Send(self, request_path, payload=None,
content_type="application/octet-stream",
timeout=None,
**kwargs):
"""Sends an RPC and returns the response.
Args:
request_path: The path to send the request to, eg /api/appversion/create.
payload: The body of the request, or None to send an empty request.
content_type: The Content-Type header to use.
timeout: timeout in seconds; default None i.e. no timeout.
(Note: for large requests on OS X, the timeout doesn't work right.)
kwargs: Any keyword arguments are converted into query string parameters.
Returns:
The response body, as a string.
"""
# TODO: Don't require authentication. Let the server say
# whether it is necessary.
if not self.authenticated:
self._Authenticate()
old_timeout = socket.getdefaulttimeout()
socket.setdefaulttimeout(timeout)
try:
tries = 0
while True:
tries += 1
args = dict(kwargs)
url = "http://%s%s" % (self.host, request_path)
if args:
url += "?" + urllib.urlencode(args)
req = self._CreateRequest(url=url, data=payload)
req.add_header("Content-Type", content_type)
try:
f = self.opener.open(req)
response = f.read()
f.close()
return response
except urllib2.HTTPError, e:
if tries > 3:
raise
elif e.code == 401:
self._Authenticate()
## elif e.code >= 500 and e.code < 600:
## # Server Error - try again.
## continue
else:
raise
finally:
socket.setdefaulttimeout(old_timeout)
class HttpRpcServer(AbstractRpcServer):
"""Provides a simplified RPC-style interface for HTTP requests."""
def _Authenticate(self):
"""Save the cookie jar after authentication."""
super(HttpRpcServer, self)._Authenticate()
if self.save_cookies:
StatusUpdate("Saving authentication cookies to %s" % self.cookie_file)
self.cookie_jar.save()
def _GetOpener(self):
"""Returns an OpenerDirector that supports cookies and ignores redirects.
Returns:
A urllib2.OpenerDirector object.
"""
opener = urllib2.OpenerDirector()
opener.add_handler(urllib2.ProxyHandler())
opener.add_handler(urllib2.UnknownHandler())
opener.add_handler(urllib2.HTTPHandler())
opener.add_handler(urllib2.HTTPDefaultErrorHandler())
opener.add_handler(urllib2.HTTPSHandler())
opener.add_handler(urllib2.HTTPErrorProcessor())
if self.save_cookies:
self.cookie_file = os.path.expanduser("~/.codereview_upload_cookies")
self.cookie_jar = cookielib.MozillaCookieJar(self.cookie_file)
if os.path.exists(self.cookie_file):
try:
self.cookie_jar.load()
self.authenticated = True
StatusUpdate("Loaded authentication cookies from %s" %
self.cookie_file)
except (cookielib.LoadError, IOError):
# Failed to load cookies - just ignore them.
pass
else:
# Create an empty cookie file with mode 600
fd = os.open(self.cookie_file, os.O_CREAT, 0600)
os.close(fd)
# Always chmod the cookie file
os.chmod(self.cookie_file, 0600)
else:
# Don't save cookies across runs of update.py.
self.cookie_jar = cookielib.CookieJar()
opener.add_handler(urllib2.HTTPCookieProcessor(self.cookie_jar))
return opener
parser = optparse.OptionParser(usage="%prog [options] [-- diff_options]")
parser.add_option("-y", "--assume_yes", action="store_true",
dest="assume_yes", default=False,
help="Assume that the answer to yes/no questions is 'yes'.")
# Logging
group = parser.add_option_group("Logging options")
group.add_option("-q", "--quiet", action="store_const", const=0,
dest="verbose", help="Print errors only.")
group.add_option("-v", "--verbose", action="store_const", const=2,
dest="verbose", default=1,
help="Print info level logs (default).")
group.add_option("--noisy", action="store_const", const=3,
dest="verbose", help="Print all logs.")
# Review server
group = parser.add_option_group("Review server options")
group.add_option("-s", "--server", action="store", dest="server",
default="codereview.appspot.com",
metavar="SERVER",
help=("The server to upload to. The format is host[:port]. "
"Defaults to 'codereview.appspot.com'."))
group.add_option("-e", "--email", action="store", dest="email",
metavar="EMAIL", default=None,
help="The username to use. Will prompt if omitted.")
group.add_option("-H", "--host", action="store", dest="host",
metavar="HOST", default=None,
help="Overrides the Host header sent with all RPCs.")
group.add_option("--no_cookies", action="store_false",
dest="save_cookies", default=True,
help="Do not save authentication cookies to local disk.")
# Issue
group = parser.add_option_group("Issue options")
group.add_option("-d", "--description", action="store", dest="description",
metavar="DESCRIPTION", default=None,
help="Optional description when creating an issue.")
group.add_option("--min_pylint_score", action="store", dest="min_pylint_score",
metavar="MIN_PYLINT_SCORE", default=None,
help="run pylint over changed files and require a min score.")
group.add_option("-f", "--description_file", action="store",
dest="description_file", metavar="DESCRIPTION_FILE",
default=None,
help="Optional path of a file that contains "
"the description when creating an issue.")
group.add_option("--description_editor", action="store_true",
dest="description_editor", metavar="DESCRIPTION_EDITOR",
default=False,
help="use an editor (EDITOR env variable) to get the "
"description when creating an issue.")
group.add_option("-r", "--reviewers", action="store", dest="reviewers",
metavar="REVIEWERS", default=None,
help="Add reviewers (comma separated email addresses).")
group.add_option("--cc", action="store", dest="cc",
metavar="CC", default=None,
help="Add CC (comma separated email addresses).")
# Upload options
group = parser.add_option_group("Patch options")
group.add_option("-m", "--message", action="store", dest="message",
metavar="MESSAGE", default=None,
help="A message to identify the patch. "
"Will prompt if omitted.")
group.add_option("-i", "--issue", type="int", action="store",
metavar="ISSUE", default=None,
help="Issue number to which to add. Defaults to new issue.")
group.add_option("--download_base", action="store_true",
dest="download_base", default=False,
help="Base files will be downloaded by the server "
"(side-by-side diffs may not work on files with CRs).")
group.add_option("--rev", action="store", dest="revision",
metavar="REV", default=None,
help="Branch/tree/revision to diff against (used by DVCS).")
group.add_option("--send_mail", action="store_true",
dest="send_mail", default=False,
help="Send notification email to reviewers.")
def GetRpcServer(options):
"""Returns an instance of an AbstractRpcServer.
Returns:
A new AbstractRpcServer, on which RPC calls can be made.
"""
rpc_server_class = HttpRpcServer
def GetUserCredentials():
"""Prompts the user for a username and password."""
email = options.email
if email is None:
email = GetEmail("Email (login for uploading to %s)" % options.server)
password = getpass.getpass("Password for %s: " % email)
return (email, password)
# If this is the dev_appserver, use fake authentication.
host = (options.host or options.server).lower()
if host == "localhost" or host.startswith("localhost:"):
email = options.email
if email is None:
email = "test@example.com"
logging.info("Using debug user %s. Override with --email" % email)
server = rpc_server_class(
options.server,
lambda: (email, "password"),
host_override=options.host,
extra_headers={"Cookie":
'dev_appserver_login="%s:False"' % email},
save_cookies=options.save_cookies)
# Don't try to talk to ClientLogin.
server.authenticated = True
return server
return rpc_server_class(options.server, GetUserCredentials,
host_override=options.host,
save_cookies=options.save_cookies)
def EncodeMultipartFormData(fields, files):
"""Encode form fields for multipart/form-data.
Args:
fields: A sequence of (name, value) elements for regular form fields.
files: A sequence of (name, filename, value) elements for data to be
uploaded as files.
Returns:
(content_type, body) ready for httplib.HTTP instance.
Source:
http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/146306
"""
BOUNDARY = '-M-A-G-I-C---B-O-U-N-D-A-R-Y-'
CRLF = '\r\n'
lines = []
for (key, value) in fields:
lines.append('--' + BOUNDARY)
lines.append('Content-Disposition: form-data; name="%s"' % key)
lines.append('')
lines.append(value)
for (key, filename, value) in files:
lines.append('--' + BOUNDARY)
lines.append('Content-Disposition: form-data; name="%s"; filename="%s"' %
(key, filename))
lines.append('Content-Type: %s' % GetContentType(filename))
lines.append('')
lines.append(value)
lines.append('--' + BOUNDARY + '--')
lines.append('')
body = CRLF.join(lines)
content_type = 'multipart/form-data; boundary=%s' % BOUNDARY
return content_type, body
def GetContentType(filename):
"""Helper to guess the content-type from the filename."""
return mimetypes.guess_type(filename)[0] or 'application/octet-stream'
# Use a shell for subcommands on Windows to get a PATH search.
use_shell = sys.platform.startswith("win")
def RunShellWithReturnCode(command, print_output=False,
universal_newlines=True):
"""Executes a command and returns the output from stdout and the return code.
Args:
command: Command to execute.
print_output: If True, the output is printed to stdout.
If False, both stdout and stderr are ignored.
universal_newlines: Use universal_newlines flag (default: True).
Returns:
Tuple (output, return code)
"""
logging.info("Running %s", command)
p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
shell=use_shell, universal_newlines=universal_newlines)
if print_output:
output_array = []
while True:
line = p.stdout.readline()
if not line:
break
print line.strip("\n")
output_array.append(line)
output = "".join(output_array)
else:
output = p.stdout.read()
p.wait()
errout = p.stderr.read()
if print_output and errout:
print >>sys.stderr, errout
p.stdout.close()
p.stderr.close()
return output, p.returncode
def RunShell(command, silent_ok=False, universal_newlines=True,
print_output=False, ignore_retcode=False):
data, retcode = RunShellWithReturnCode(command, print_output,
universal_newlines)
if retcode and not ignore_retcode:
ErrorExit("Got error status from %s:\n%s" % (command, data))
if not silent_ok and not data:
ErrorExit("No output from %s" % command)
return data
class VersionControlSystem(object):
"""Abstract base class providing an interface to the VCS."""
def __init__(self, options):
"""Constructor.
Args:
options: Command line options.
"""
self.options = options
def GenerateDiff(self, args):
"""Return the current diff as a string.
Args:
args: Extra arguments to pass to the diff command.
"""
raise NotImplementedError(
"abstract method -- subclass %s must override" % self.__class__)
def GetUnknownFiles(self):
"""Return a list of files unknown to the VCS."""
raise NotImplementedError(
"abstract method -- subclass %s must override" % self.__class__)
def CheckForUnknownFiles(self):
"""Show an "are you sure?" prompt if there are unknown files."""
unknown_files = self.GetUnknownFiles()
if unknown_files:
print "The following files are not added to version control:"
for line in unknown_files:
print line
AreYouSureOrExit()
def GetBaseFile(self, filename):
"""Get the content of the upstream version of a file.
Returns:
A tuple (base_content, new_content, is_binary, status)
base_content: The contents of the base file.
new_content: For text files, this is empty. For binary files, this is
the contents of the new file, since the diff output won't contain
information to reconstruct the current file.
is_binary: True iff the file is binary.
status: The status of the file.
"""
raise NotImplementedError(
"abstract method -- subclass %s must override" % self.__class__)
def GetBaseFiles(self, diff):
"""Helper that calls GetBase file for each file in the patch.
Returns:
A dictionary that maps from filename to GetBaseFile's tuple. Filenames
are retrieved based on lines that start with "Index:" or
"Property changes on:".
"""
files = {}
for line in diff.splitlines(True):
if line.startswith('Index:') or line.startswith('Property changes on:'):
unused, filename = line.split(':', 1)
# On Windows if a file has property changes its filename uses '\'
# instead of '/'.
filename = filename.strip().replace('\\', '/')
files[filename] = self.GetBaseFile(filename)
return files
def UploadBaseFiles(self, issue, rpc_server, patch_list, patchset, options,
files):
"""Uploads the base files (and if necessary, the current ones as well)."""
def UploadFile(filename, file_id, content, is_binary, status, is_base):
"""Uploads a file to the server."""
file_too_large = False
if is_base:
type = "base"
else:
type = "current"
if len(content) > MAX_UPLOAD_SIZE:
print ("Not uploading the %s file for %s because it's too large." %
(type, filename))
file_too_large = True
content = ""
checksum = md5.new(content).hexdigest()
if options.verbose > 0 and not file_too_large:
print "Uploading %s file for %s" % (type, filename)
url = "/%d/upload_content/%d/%d" % (int(issue), int(patchset), file_id)
form_fields = [("filename", filename),
("status", status),
("checksum", checksum),
("is_binary", str(is_binary)),
("is_current", str(not is_base)),
]
if file_too_large:
form_fields.append(("file_too_large", "1"))
if options.email:
form_fields.append(("user", options.email))
ctype, body = EncodeMultipartFormData(form_fields,
[("data", filename, content)])
response_body = rpc_server.Send(url, body,
content_type=ctype)
if not response_body.startswith("OK"):
StatusUpdate(" --> %s" % response_body)
sys.exit(1)
patches = dict()
[patches.setdefault(v, k) for k, v in patch_list]
for filename in patches.keys():
base_content, new_content, is_binary, status = files[filename]
file_id_str = patches.get(filename)
if file_id_str.find("nobase") != -1:
base_content = None
file_id_str = file_id_str[file_id_str.rfind("_") + 1:]
file_id = int(file_id_str)
if base_content != None:
UploadFile(filename, file_id, base_content, is_binary, status, True)
if new_content != None:
UploadFile(filename, file_id, new_content, is_binary, status, False)
def IsImage(self, filename):
"""Returns true if the filename has an image extension."""
mimetype = mimetypes.guess_type(filename)[0]
if not mimetype:
return False
return mimetype.startswith("image/")
class SubversionVCS(VersionControlSystem):
"""Implementation of the VersionControlSystem interface for Subversion."""
def __init__(self, options):
super(SubversionVCS, self).__init__(options)
if self.options.revision:
match = re.match(r"(\d+)(:(\d+))?", self.options.revision)
if not match:
ErrorExit("Invalid Subversion revision %s." % self.options.revision)
self.rev_start = match.group(1)
self.rev_end = match.group(3)
else:
self.rev_start = self.rev_end = None
# Cache output from "svn list -r REVNO dirname".
# Keys: dirname, Values: 2-tuple (ouput for start rev and end rev).
self.svnls_cache = {}
# SVN base URL is required to fetch files deleted in an older revision.
# Result is cached to not guess it over and over again in GetBaseFile().
required = self.options.download_base or self.options.revision is not None
self.svn_base = self._GuessBase(required)
def GuessBase(self, required):
"""Wrapper for _GuessBase."""
return self.svn_base
def _GuessBase(self, required):
"""Returns the SVN base URL.
Args:
required: If true, exits if the url can't be guessed, otherwise None is
returned.
"""
info = RunShell(["svn", "info"])
for line in info.splitlines():
words = line.split()
if len(words) == 2 and words[0] == "URL:":
url = words[1]
scheme, netloc, path, params, query, fragment = urlparse.urlparse(url)
username, netloc = urllib.splituser(netloc)
if username:
logging.info("Removed username from base URL")
if netloc.endswith("svn.python.org"):
if netloc == "svn.python.org":
if path.startswith("/projects/"):
path = path[9:]
elif netloc != "pythondev@svn.python.org":
ErrorExit("Unrecognized Python URL: %s" % url)
base = "http://svn.python.org/view/*checkout*%s/" % path
logging.info("Guessed Python base = %s", base)
elif netloc.endswith("svn.collab.net"):
if path.startswith("/repos/"):
path = path[6:]
base = "http://svn.collab.net/viewvc/*checkout*%s/" % path
logging.info("Guessed CollabNet base = %s", base)
elif netloc.endswith(".googlecode.com"):
path = path + "/"
base = urlparse.urlunparse(("http", netloc, path, params,
query, fragment))
logging.info("Guessed Google Code base = %s", base)
else:
path = path + "/"
base = urlparse.urlunparse((scheme, netloc, path, params,
query, fragment))
logging.info("Guessed base = %s", base)
return base
if required:
ErrorExit("Can't find URL in output from svn info")
return None
def GenerateDiff(self, args):
cmd = ["svn", "diff"]
if self.options.revision:
cmd += ["-r", self.options.revision]
if "--diff-cmd" not in args and os.path.isfile("/usr/bin/diff"):
# force /usr/bin/diff as the diff command used by subversion
# to override user settings (fixes issue with colordiff)
cmd += ["--diff-cmd", "/usr/bin/diff"]
cmd.extend(args)
data = RunShell(cmd)
count = 0
for line in data.splitlines():
if line.startswith("Index:") or line.startswith("Property changes on:"):
count += 1
logging.info(line)
if not count:
ErrorExit("No valid patches found in output from svn diff")
return data
def _CollapseKeywords(self, content, keyword_str):
"""Collapses SVN keywords."""
# svn cat translates keywords but svn diff doesn't. As a result of this
# behavior patching.PatchChunks() fails with a chunk mismatch error.
# This part was originally written by the Review Board development team
# who had the same problem (http://reviews.review-board.org/r/276/).
# Mapping of keywords to known aliases
svn_keywords = {
# Standard keywords
'Date': ['Date', 'LastChangedDate'],
'Revision': ['Revision', 'LastChangedRevision', 'Rev'],
'Author': ['Author', 'LastChangedBy'],
'HeadURL': ['HeadURL', 'URL'],
'Id': ['Id'],
# Aliases
'LastChangedDate': ['LastChangedDate', 'Date'],
'LastChangedRevision': ['LastChangedRevision', 'Rev', 'Revision'],
'LastChangedBy': ['LastChangedBy', 'Author'],
'URL': ['URL', 'HeadURL'],
}
def repl(m):
if m.group(2):
return "$%s::%s$" % (m.group(1), " " * len(m.group(3)))
return "$%s$" % m.group(1)
keywords = [keyword
for name in keyword_str.split(" ")
for keyword in svn_keywords.get(name, [])]
return re.sub(r"\$(%s):(:?)([^\$]+)\$" % '|'.join(keywords), repl, content)
def GetUnknownFiles(self):
status = RunShell(["svn", "status", "--ignore-externals"], silent_ok=True)
unknown_files = []
for line in status.split("\n"):
if line and line[0] == "?":
unknown_files.append(line)
return unknown_files
def ReadFile(self, filename):
"""Returns the contents of a file."""
file = open(filename, 'rb')
result = ""
try:
result = file.read()
finally:
file.close()
return result
def GetStatus(self, filename):
"""Returns the status of a file."""
if not self.options.revision:
status = RunShell(["svn", "status", "--ignore-externals", filename])
if not status:
ErrorExit("svn status returned no output for %s" % filename)
status_lines = status.splitlines()
# If file is in a cl, the output will begin with
# "\n--- Changelist 'cl_name':\n". See
# http://svn.collab.net/repos/svn/trunk/notes/changelist-design.txt
if (len(status_lines) == 3 and
not status_lines[0] and
status_lines[1].startswith("--- Changelist")):
status = status_lines[2]
else:
status = status_lines[0]
# If we have a revision to diff against we need to run "svn list"
# for the old and the new revision and compare the results to get
# the correct status for a file.
else:
dirname, relfilename = os.path.split(filename)
if dirname not in self.svnls_cache:
cmd = ["svn", "list", "-r", self.rev_start, dirname or "."]
out, returncode = RunShellWithReturnCode(cmd)
if returncode:
ErrorExit("Failed to get status for %s." % filename)
old_files = out.splitlines()
args = ["svn", "list"]
if self.rev_end:
args += ["-r", self.rev_end]
cmd = args + [dirname or "."]
out, returncode = RunShellWithReturnCode(cmd)
if returncode:
ErrorExit("Failed to run command %s" % cmd)
self.svnls_cache[dirname] = (old_files, out.splitlines())
old_files, new_files = self.svnls_cache[dirname]
if relfilename in old_files and relfilename not in new_files:
status = "D "
elif relfilename in old_files and relfilename in new_files:
status = "M "
else:
status = "A "
return status
def GetBaseFile(self, filename):
status = self.GetStatus(filename)
base_content = None
new_content = None
# If a file is copied its status will be "A +", which signifies
# "addition-with-history". See "svn st" for more information. We need to
# upload the original file or else diff parsing will fail if the file was
# edited.
if status[0] == "A" and status[3] != "+":
# We'll need to upload the new content if we're adding a binary file
# since diff's output won't contain it.
mimetype = RunShell(["svn", "propget", "svn:mime-type", filename],
silent_ok=True)
base_content = ""
is_binary = bool(mimetype) and not mimetype.startswith("text/")
if is_binary and self.IsImage(filename):
new_content = self.ReadFile(filename)
elif (status[0] in ("M", "D", "R") or
(status[0] == "A" and status[3] == "+") or # Copied file.
(status[0] == " " and status[1] == "M")): # Property change.
args = []
if self.options.revision:
url = "%s/%s@%s" % (self.svn_base, filename, self.rev_start)
else:
# Don't change filename, it's needed later.
url = filename
args += ["-r", "BASE"]
cmd = ["svn"] + args + ["propget", "svn:mime-type", url]
mimetype, returncode = RunShellWithReturnCode(cmd)
if returncode:
# File does not exist in the requested revision.
# Reset mimetype, it contains an error message.
mimetype = ""
get_base = False
is_binary = bool(mimetype) and not mimetype.startswith("text/")
if status[0] == " ":
# Empty base content just to force an upload.
base_content = ""
elif is_binary:
if self.IsImage(filename):
get_base = True
if status[0] == "M":
if not self.rev_end:
new_content = self.ReadFile(filename)
else:
url = "%s/%s@%s" % (self.svn_base, filename, self.rev_end)
new_content = RunShell(["svn", "cat", url],
universal_newlines=True, silent_ok=True)
else:
base_content = ""
else:
get_base = True
if get_base:
if is_binary:
universal_newlines = False
else:
universal_newlines = True
if self.rev_start:
# "svn cat -r REV delete_file.txt" doesn't work. cat requires
# the full URL with "@REV" appended instead of using "-r" option.
url = "%s/%s@%s" % (self.svn_base, filename, self.rev_start)
base_content = RunShell(["svn", "cat", url],
universal_newlines=universal_newlines,
silent_ok=True)
else:
base_content = RunShell(["svn", "cat", filename],
universal_newlines=universal_newlines,
silent_ok=True)
if not is_binary:
args = []
if self.rev_start:
url = "%s/%s@%s" % (self.svn_base, filename, self.rev_start)
else:
url = filename
args += ["-r", "BASE"]
cmd = ["svn"] + args + ["propget", "svn:keywords", url]
keywords, returncode = RunShellWithReturnCode(cmd)
if keywords and not returncode:
base_content = self._CollapseKeywords(base_content, keywords)
else:
StatusUpdate("svn status returned unexpected output: %s" % status)
sys.exit(1)
return base_content, new_content, is_binary, status[0:5]
class GitVCS(VersionControlSystem):
"""Implementation of the VersionControlSystem interface for Git."""
def __init__(self, options):
super(GitVCS, self).__init__(options)
# Map of filename -> hash of base file.
self.base_hashes = {}
def GenerateDiff(self, extra_args):
# This is more complicated than svn's GenerateDiff because we must convert
# the diff output to include an svn-style "Index:" line as well as record
# the hashes of the base files, so we can upload them along with our diff.
if self.options.revision:
extra_args = [self.options.revision] + extra_args
gitdiff = RunShell(["git", "diff", "--full-index"] + extra_args)
svndiff = []
filecount = 0
filename = None
for line in gitdiff.splitlines():
match = re.match(r"diff --git a/(.*) b/.*$", line)
if match:
filecount += 1
filename = match.group(1)
svndiff.append("Index: %s\n" % filename)
else:
# The "index" line in a git diff looks like this (long hashes elided):
# index 82c0d44..b2cee3f 100755
# We want to save the left hash, as that identifies the base file.
match = re.match(r"index (\w+)\.\.", line)
if match:
self.base_hashes[filename] = match.group(1)
svndiff.append(line + "\n")
if not filecount:
ErrorExit("No valid patches found in output from git diff")
return "".join(svndiff)
def GetUnknownFiles(self):
status = RunShell(["git", "ls-files", "--exclude-standard", "--others"],
silent_ok=True)
return status.splitlines()
def GetBaseFile(self, filename):
hash = self.base_hashes[filename]
base_content = None
new_content = None
is_binary = False
if hash == "0" * 40: # All-zero hash indicates no base file.
status = "A"
base_content = ""
else:
status = "M"
base_content, returncode = RunShellWithReturnCode(["git", "show", hash])
if returncode:
ErrorExit("Got error status from 'git show %s'" % hash)
return (base_content, new_content, is_binary, status)
class MercurialVCS(VersionControlSystem):
"""Implementation of the VersionControlSystem interface for Mercurial."""
def __init__(self, options, repo_dir):
super(MercurialVCS, self).__init__(options)
# Absolute path to repository (we can be in a subdir)
self.repo_dir = os.path.normpath(repo_dir)
# Compute the subdir
cwd = os.path.normpath(os.getcwd())
assert cwd.startswith(self.repo_dir)
self.subdir = cwd[len(self.repo_dir):].lstrip(r"\/")
if self.options.revision:
self.base_rev = self.options.revision
else:
self.base_rev = RunShell(["hg", "parent", "-q"]).split(':')[1].strip()
def _GetRelPath(self, filename):
"""Get relative path of a file according to the current directory,
given its logical path in the repo."""
assert filename.startswith(self.subdir), filename
return filename[len(self.subdir):].lstrip(r"\/")
def GenerateDiff(self, extra_args):
# If no file specified, restrict to the current subdir
extra_args = extra_args or ["."]
cmd = ["hg", "diff", "--git", "-r", self.base_rev] + extra_args
data = RunShell(cmd, silent_ok=True)
svndiff = []
filecount = 0
for line in data.splitlines():
m = re.match("diff --git a/(\S+) b/(\S+)", line)
if m:
# Modify line to make it look like as it comes from svn diff.
# With this modification no changes on the server side are required
# to make upload.py work with Mercurial repos.
# NOTE: for proper handling of moved/copied files, we have to use
# the second filename.
filename = m.group(2)
svndiff.append("Index: %s" % filename)
svndiff.append("=" * 67)
filecount += 1
logging.info(line)
else:
svndiff.append(line)
if not filecount:
ErrorExit("No valid patches found in output from hg diff")
return "\n".join(svndiff) + "\n"
def GetUnknownFiles(self):
"""Return a list of files unknown to the VCS."""
args = []
status = RunShell(["hg", "status", "--rev", self.base_rev, "-u", "."],
silent_ok=True)
unknown_files = []
for line in status.splitlines():
st, fn = line.split(" ", 1)
if st == "?":
unknown_files.append(fn)
return unknown_files
def GetBaseFile(self, filename):
# "hg status" and "hg cat" both take a path relative to the current subdir
# rather than to the repo root, but "hg diff" has given us the full path
# to the repo root.
base_content = ""
new_content = None
is_binary = False
oldrelpath = relpath = self._GetRelPath(filename)
# "hg status -C" returns two lines for moved/copied files, one otherwise
out = RunShell(["hg", "status", "-C", "--rev", self.base_rev, relpath])
out = out.splitlines()
# HACK: strip error message about missing file/directory if it isn't in
# the working copy
if out[0].startswith('%s: ' % relpath):
out = out[1:]
if len(out) > 1:
# Moved/copied => considered as modified, use old filename to
# retrieve base contents
oldrelpath = out[1].strip()
status = "M"
else:
status, _ = out[0].split(' ', 1)
if status != "A":
base_content = RunShell(["hg", "cat", "-r", self.base_rev, oldrelpath],
silent_ok=True)
is_binary = "\0" in base_content # Mercurial's heuristic
if status != "R":
new_content = open(relpath, "rb").read()
is_binary = is_binary or "\0" in new_content
if is_binary and base_content:
# Fetch again without converting newlines
base_content = RunShell(["hg", "cat", "-r", self.base_rev, oldrelpath],
silent_ok=True, universal_newlines=False)
if not is_binary or not self.IsImage(relpath):
new_content = None
return base_content, new_content, is_binary, status
# NOTE: The SplitPatch function is duplicated in engine.py, keep them in sync.
def SplitPatch(data):
"""Splits a patch into separate pieces for each file.
Args:
data: A string containing the output of svn diff.
Returns:
A list of 2-tuple (filename, text) where text is the svn diff output
pertaining to filename.
"""
patches = []
filename = None
diff = []
for line in data.splitlines(True):
new_filename = None
if line.startswith('Index:'):
unused, new_filename = line.split(':', 1)
new_filename = new_filename.strip()
elif line.startswith('Property changes on:'):
unused, temp_filename = line.split(':', 1)
# When a file is modified, paths use '/' between directories, however
# when a property is modified '\' is used on Windows. Make them the same
# otherwise the file shows up twice.
temp_filename = temp_filename.strip().replace('\\', '/')
if temp_filename != filename:
# File has property changes but no modifications, create a new diff.
new_filename = temp_filename
if new_filename:
if filename and diff:
patches.append((filename, ''.join(diff)))
filename = new_filename
diff = [line]
continue
if diff is not None:
diff.append(line)
if filename and diff:
patches.append((filename, ''.join(diff)))
return patches
def UploadSeparatePatches(issue, rpc_server, patchset, data, options):
"""Uploads a separate patch for each file in the diff output.
Returns a list of [patch_key, filename] for each file.
"""
patches = SplitPatch(data)
rv = []
for patch in patches:
if len(patch[1]) > MAX_UPLOAD_SIZE:
print ("Not uploading the patch for " + patch[0] +
" because the file is too large.")
continue
form_fields = [("filename", patch[0])]
if not options.download_base:
form_fields.append(("content_upload", "1"))
files = [("data", "data.diff", patch[1])]
ctype, body = EncodeMultipartFormData(form_fields, files)
url = "/%d/upload_patch/%d" % (int(issue), int(patchset))
print "Uploading patch for " + patch[0]
response_body = rpc_server.Send(url, body, content_type=ctype)
lines = response_body.splitlines()
if not lines or lines[0] != "OK":
StatusUpdate(" --> %s" % response_body)
sys.exit(1)
rv.append([lines[1], patch[0]])
return rv
def GuessVCS(options):
"""Helper to guess the version control system.
This examines the current directory, guesses which VersionControlSystem
we're using, and returns an instance of the appropriate class. Exit with an
error if we can't figure it out.
Returns:
A VersionControlSystem instance. Exits if the VCS can't be guessed.
"""
# Mercurial has a command to get the base directory of a repository
# Try running it, but don't die if we don't have hg installed.
# NOTE: we try Mercurial first as it can sit on top of an SVN working copy.
try:
out, returncode = RunShellWithReturnCode(["hg", "root"])
if returncode == 0:
return MercurialVCS(options, out.strip())
except OSError, (errno, message):
if errno != 2: # ENOENT -- they don't have hg installed.
raise
# Subversion has a .svn in all working directories.
if os.path.isdir('.svn'):
logging.info("Guessed VCS = Subversion")
return SubversionVCS(options)
# Git has a command to test if you're in a git tree.
# Try running it, but don't die if we don't have git installed.
try:
out, returncode = RunShellWithReturnCode(["git", "rev-parse",
"--is-inside-work-tree"])
if returncode == 0:
return GitVCS(options)
except OSError, (errno, message):
if errno != 2: # ENOENT -- they don't have git installed.
raise
ErrorExit(("Could not guess version control system. "
"Are you in a working copy directory?"))
def RealMain(argv, data=None):
"""The real main function.
Args:
argv: Command line arguments.
data: Diff contents. If None (default) the diff is generated by
the VersionControlSystem implementation returned by GuessVCS().
Returns:
A 2-tuple (issue id, patchset id).
The patchset id is None if the base files are not uploaded by this
script (applies only to SVN checkouts).
"""
logging.basicConfig(format=("%(asctime).19s %(levelname)s %(filename)s:"
"%(lineno)s %(message)s "))
os.environ['LC_ALL'] = 'C'
options, args = parser.parse_args(argv[1:])
global verbosity
verbosity = options.verbose
if verbosity >= 3:
logging.getLogger().setLevel(logging.DEBUG)
elif verbosity >= 2:
logging.getLogger().setLevel(logging.INFO)
vcs = GuessVCS(options)
if isinstance(vcs, SubversionVCS):
# base field is only allowed for Subversion.
# Note: Fetching base files may become deprecated in future releases.
base = vcs.GuessBase(options.download_base)
else:
base = None
if not base and options.download_base:
options.download_base = True
logging.info("Enabled upload of base file")
if not options.assume_yes:
vcs.CheckForUnknownFiles()
if data is None:
data = vcs.GenerateDiff(args)
files = vcs.GetBaseFiles(data)
if options.min_pylint_score:
print "running pylint..."
has_low_score = 0
for file in files:
if re.search(r'[.]py$', file):
print "pylinting "+file+"..."
res = RunShell(["pylint", file], silent_ok=True, ignore_retcode=True)
match = re.search(r'Your code has been rated at ([0-9.-]+)', res)
try:
score = float(match.group(1))
except:
score = -1.0
print file,"rated at",score
if score < float(options.min_pylint_score):
has_low_score += 1
if has_low_score > 0:
print "pylint reported", has_low_score, \
"files with scores below", options.min_pylint_score
AreYouSureOrExit()
if verbosity >= 1:
print "Upload server:", options.server, "(change with -s/--server)"
if options.issue:
prompt = "Message describing this patch set: "
else:
prompt = "New issue subject: "
message = options.message or raw_input(prompt).strip()
if not message:
ErrorExit("A non-empty message is required")
rpc_server = GetRpcServer(options)
form_fields = [("subject", message)]
if base:
form_fields.append(("base", base))
if options.issue:
form_fields.append(("issue", str(options.issue)))
if options.email:
form_fields.append(("user", options.email))
if options.reviewers:
for reviewer in options.reviewers.split(','):
if "@" in reviewer and not reviewer.split("@")[1].count(".") == 1:
ErrorExit("Invalid email address: %s" % reviewer)
form_fields.append(("reviewers", options.reviewers))
if options.cc:
for cc in options.cc.split(','):
if "@" in cc and not cc.split("@")[1].count(".") == 1:
ErrorExit("Invalid email address: %s" % cc)
form_fields.append(("cc", options.cc))
description = options.description
if options.description_file:
if options.description:
ErrorExit("Can't specify description and description_file")
file = open(options.description_file, 'r')
description = file.read()
file.close()
if options.description_editor:
if options.description:
ErrorExit("Can't specify description and description_editor")
if options.description_file:
ErrorExit("Can't specify description_file and description_editor")
if 'EDITOR' not in os.environ:
ErrorExit("Please set the EDITOR environment variable.")
editor = os.environ['EDITOR']
if editor == None or editor == "":
ErrorExit("Please set the EDITOR environment variable.")
tempfh, filename = tempfile.mkstemp()
msg = "demo URL: http://your-url/foo/\ndescription: (start on next line)\n"
os.write(tempfh, msg)
os.close(tempfh)
print "running EDITOR:", editor, filename
cmd = editor + " " + filename
subprocess.call(cmd, shell=True)
file = open(filename, 'r')
description = file.read()
file.close()
os.unlink(filename)
print description
if description:
form_fields.append(("description", description))
# Send a hash of all the base file so the server can determine if a copy
# already exists in an earlier patchset.
base_hashes = ""
for file, info in files.iteritems():
if not info[0] is None:
checksum = md5.new(info[0]).hexdigest()
if base_hashes:
base_hashes += "|"
base_hashes += checksum + ":" + file
form_fields.append(("base_hashes", base_hashes))
# If we're uploading base files, don't send the email before the uploads, so
# that it contains the file status.
if options.send_mail and options.download_base:
form_fields.append(("send_mail", "1"))
if not options.download_base:
form_fields.append(("content_upload", "1"))
if len(data) > MAX_UPLOAD_SIZE:
print "Patch is large, so uploading file patches separately."
uploaded_diff_file = []
form_fields.append(("separate_patches", "1"))
else:
uploaded_diff_file = [("data", "data.diff", data)]
ctype, body = EncodeMultipartFormData(form_fields, uploaded_diff_file)
response_body = rpc_server.Send("/upload", body, content_type=ctype)
patchset = None
if not options.download_base or not uploaded_diff_file:
lines = response_body.splitlines()
if len(lines) >= 2:
msg = lines[0]
patchset = lines[1].strip()
patches = [x.split(" ", 1) for x in lines[2:]]
else:
msg = response_body
else:
msg = response_body
StatusUpdate(msg)
if not response_body.startswith("Issue created.") and \
not response_body.startswith("Issue updated."):
sys.exit(0)
issue = msg[msg.rfind("/")+1:]
if not uploaded_diff_file:
result = UploadSeparatePatches(issue, rpc_server, patchset, data, options)
if not options.download_base:
patches = result
if not options.download_base:
vcs.UploadBaseFiles(issue, rpc_server, patches, patchset, options, files)
if options.send_mail:
rpc_server.Send("/" + issue + "/mail", payload="")
return issue, patchset
FPREVIEW_ADDR = "footprint2009reviews.appspot.com"
def main():
try:
if len(sys.argv) == 1:
print "Usage:", sys.argv[0], "<email address of primary reviewer>"
print "(automatically cc's", FPREVIEW_ADDR, ")"
sys.exit(1)
args = [sys.argv[0], "-s", "footprint2009reviews.appspot.com"]
args.append("--cc=footprint-eng@googlegroups.com")
args.append("--description_editor")
args.append("--send_mail")
args.append("--min_pylint_score")
# we're starting with 9.0
args.append("9.0")
args.append("-r")
email = sys.argv[1]
if email.find("@") == -1:
email += "@gmail.com"
print >>sys.stderr, "*** sending to "+email+" for review. (note: @gmail.com)"
args.append(email)
sys.argv = args + sys.argv[2:]
if "PYLINTRC" not in os.environ:
testpath = os.getcwd()
while testpath != "" and not os.path.exists(testpath + "/pylintrc"):
testpath = re.sub(r'/[^/]*$', '', testpath)
print "checking for "+testpath + "/pylintrc"
if testpath == "":
print >>sys.stderr, "ERROR: couldn't find 'pylintrc' file."
sys.exit(1)
os.environ['PYLINTRC'] = testpath + "/pylintrc"
print "guessing PYLINTRC="+os.environ['PYLINTRC']
print "running: ", " ".join(sys.argv)
RealMain(sys.argv)
except KeyboardInterrupt:
print
StatusUpdate("Interrupted.")
sys.exit(1)
if __name__ == "__main__":
main()
| Python |
#!/usr/bin/python
#
# didn't use generateDS because it required a slew of packages to be installed,
# like pulling on a sweater.
"""horrible regexp-based converter from XSD to human-readable HTML."""
# disable line too long-- irrelevant here
# pylint: disable-msg=C0301
# usage: python spec2html.py < spec0.1.xsd > spec0.1.html
import sys
import re
def main():
"""wrap the code in scope."""
outstr = sys.stdin.read()
version = (re.findall(r'<xs:schema version="(.+?)"', outstr))[0]
outstr = re.sub(r'(\r?\n|\r)', r'', outstr)
outstr = re.sub(r'<[?]xml.+?>', r'', outstr)
outstr = re.sub(r'</?xs:schema.*?>', r'', outstr)
outstr = re.sub(r'<code>(.+?)</code>', r'<a href="#\1"><code>\1</code></a>', outstr)
outstr = re.sub(r'<pcode>(.+?)</pcode>', r'<code>\1</code>', outstr)
outstr = re.sub(r'<(/?(code|p|a|br|b).*?)>', r'&&\1@@', outstr)
outstr = re.sub(r'<', r'', outstr)
outstr = re.sub(r'/?>', r'', outstr)
#blockquoting
outstr = re.sub(r'/xs:(all|sequence)', r'</blockquote>', outstr)
#Change element to selement for distinguishing multiple entries later on
outstr = re.sub(r'xs:sequence(.+?)xs:element', r'xs:sequence\1xs:selement', outstr)
#blockquoting
outstr = re.sub(r'xs:(all|sequence)', r'<blockquote>', outstr)
#Named types
outstr = re.sub(r'xs:(simple|complex)Type name="(.+?)"(.+?)/xs:(simple|complex)Type',
r'<div class="namedType"><div class="entryName"><a name="\2">\2 (\1 type)</a></div>\3</div>', outstr)
#Extension
outstr = re.sub(r'xs:extension\s+?base="(xs:)?(.+?)"(.+?)/xs:extension', r'<div class="info">derived from: \2</div>\3', outstr)
#restriction
outstr = re.sub(r'xs:restriction\s+?base="(xs:)?(.+?)"(.+?)/xs:restriction', r'<div class="info">derived from: \2</div>\3', outstr)
#attribute entries
outstr = re.sub(r'/xs:attribute', r'</blockquote></div>\n', outstr)
outstr = re.sub(r'\s*xs:attribute name="(.+?)"', r'<div class="entry"><blockquote><div class="entryName"><a name="\1">\1 (attribute)</a></div>\n', outstr)
#element entries
outstr = re.sub(r'/xs:element', r'</div>\n', outstr)
outstr = re.sub(r'\s*xs:selement name="(.+?)"(.+?)', r'<div class="entry repeated"><div class="entryName"><a name="\1">\1 (repeated element)</a></div>\n', outstr)
outstr = re.sub(r'\s*xs:element name="(.+?)"(.+?)', r'<div class="entry"><div class="entryName"><a name="\1">\1 (element)</a></div>\n', outstr)
#documentation
outstr = re.sub(r'xs:annotation\s+xs:documentation\s+!\[CDATA\[\s*(.+?)\s*\]\]\s+/xs:documentation\s+/xs:annotation', r'<div class="doc-text">\1</div>', outstr)
#Little stuff in entries
outstr = re.sub(r'use="(.+?)"', r'<span class="info">use is \1</span><br/>', outstr)
outstr = re.sub(r'default=""', r'<span class="info">default value: <code>(empty string)</code></span><br/>', outstr)
outstr = re.sub(r'default="(.+?)"', r'<span class="info">default value: <code>\1</code></span><br/>', outstr)
outstr = re.sub(r'fixed="(.+?)"', r'<span class="info">fixed value: <code>\1</code></span><br/>', outstr)
outstr = re.sub(r'xs:enumeration value="(.+?)"', r'<span class="info">allowed value: <code>\1</code></span><br/>', outstr)
outstr = re.sub(r'xs:pattern value="(.+?)"', r'<span class="info">must match (regular expression): <code>\1</code></span><br/>', outstr)
outstr = re.sub(r'type="(xs:)?(.+?)"', r'<span class="info">datatype: \2</span><br/>', outstr)
outstr = re.sub(r'minOccurs="0"', r'<span class="info">required: optional.</span><br/>', outstr)
outstr = re.sub(r'minOccurs="([0-9]+)"', r'<span class="info">required: at least \1 times</span><br/>', outstr)
outstr = re.sub(r'maxOccurs="1"', r'<span class="info">Multiple not allowed</span><br/>', outstr)
outstr = re.sub(r'maxOccurs="unbounded"', r'\n', outstr)
#putting in links
outstr = re.sub(r'(datatype|derived from): (locationType|dateTimeDurationType|yesNoEnum|sexRestrictedEnum|dateTimeOlsonDefaultPacific|timeOlson|dateTimeNoTZ|timeNoTZ)', r'\1: <a href="#\2"><code>\2</code></a>\n', outstr)
outstr = re.sub(r'(datatype|derived from): (string)', r'\1: <a href="http://www.w3schools.com/Schema/schema_dtypes_string.asp"><code>\2</code></a>\n', outstr)
outstr = re.sub(r'(datatype|derived from): (dateTime|date|time|duration)', r'\1: <a href="http://www.w3schools.com/Schema/schema_dtypes_date.asp"><code>\2</code></a>\n', outstr)
outstr = re.sub(r'(datatype|derived from): (integer|decimal)', r'\1: <a href="http://www.w3schools.com/Schema/schema_dtypes_numeric.asp"><code>\2</code></a>\n', outstr)
#Drop stuff we don't care about
outstr = re.sub(r'/?xs:(simpleContent|complexType)', r'', outstr)
#clean-up
outstr = re.sub(r'&&', r'<', outstr)
outstr = re.sub(r'@@', r'>', outstr)
outstr = re.sub(r'\s*<br/>', r'<br/>\n', outstr)
print "<html>"
print "<head>"
print "<title>Footprint XML Specification Version", version, "</title>"
#print '<LINK REL="StyleSheet" HREF="spec.css" TYPE="text/css"/>'
print "<style>"
cssfh = open('spec.css')
print cssfh.read()
print "</style>"
print "</head>"
print "<body>"
print '<div class="titleText">Footprint XML Specification Version', version, '</div><br>'
print outstr
print "</body></html>"
main()
| Python |
#!/usr/bin/python
#
# didn't use generateDS because it required a slew of packages to be installed,
# like pulling on a sweater.
"""horrible regexp-based converter from XSD to human-readable HTML."""
# disable line too long-- irrelevant here
# pylint: disable-msg=C0301
# usage: python spec2html.py < spec0.1.xsd > spec0.1.html
import sys
import re
def main():
"""wrap the code in scope."""
outstr = sys.stdin.read()
version = (re.findall(r'<xs:schema version="(.+?)"', outstr))[0]
outstr = re.sub(r'(\r?\n|\r)', r'', outstr)
outstr = re.sub(r'<[?]xml.+?>', r'', outstr)
outstr = re.sub(r'</?xs:schema.*?>', r'', outstr)
outstr = re.sub(r'<code>(.+?)</code>', r'<a href="#\1"><code>\1</code></a>', outstr)
outstr = re.sub(r'<pcode>(.+?)</pcode>', r'<code>\1</code>', outstr)
outstr = re.sub(r'<(/?(code|p|a|br|b).*?)>', r'&&\1@@', outstr)
outstr = re.sub(r'<', r'', outstr)
outstr = re.sub(r'/?>', r'', outstr)
#blockquoting
outstr = re.sub(r'/xs:(all|sequence)', r'</blockquote>', outstr)
#Change element to selement for distinguishing multiple entries later on
outstr = re.sub(r'xs:sequence(.+?)xs:element', r'xs:sequence\1xs:selement', outstr)
#blockquoting
outstr = re.sub(r'xs:(all|sequence)', r'<blockquote>', outstr)
#Named types
outstr = re.sub(r'xs:(simple|complex)Type name="(.+?)"(.+?)/xs:(simple|complex)Type',
r'<div class="namedType"><div class="entryName"><a name="\2">\2 (\1 type)</a></div>\3</div>', outstr)
#Extension
outstr = re.sub(r'xs:extension\s+?base="(xs:)?(.+?)"(.+?)/xs:extension', r'<div class="info">derived from: \2</div>\3', outstr)
#restriction
outstr = re.sub(r'xs:restriction\s+?base="(xs:)?(.+?)"(.+?)/xs:restriction', r'<div class="info">derived from: \2</div>\3', outstr)
#attribute entries
outstr = re.sub(r'/xs:attribute', r'</blockquote></div>\n', outstr)
outstr = re.sub(r'\s*xs:attribute name="(.+?)"', r'<div class="entry"><blockquote><div class="entryName"><a name="\1">\1 (attribute)</a></div>\n', outstr)
#element entries
outstr = re.sub(r'/xs:element', r'</div>\n', outstr)
outstr = re.sub(r'\s*xs:selement name="(.+?)"(.+?)', r'<div class="entry repeated"><div class="entryName"><a name="\1">\1 (repeated element)</a></div>\n', outstr)
outstr = re.sub(r'\s*xs:element name="(.+?)"(.+?)', r'<div class="entry"><div class="entryName"><a name="\1">\1 (element)</a></div>\n', outstr)
#documentation
outstr = re.sub(r'xs:annotation\s+xs:documentation\s+!\[CDATA\[\s*(.+?)\s*\]\]\s+/xs:documentation\s+/xs:annotation', r'<div class="doc-text">\1</div>', outstr)
#Little stuff in entries
outstr = re.sub(r'use="(.+?)"', r'<span class="info">use is \1</span><br/>', outstr)
outstr = re.sub(r'default=""', r'<span class="info">default value: <code>(empty string)</code></span><br/>', outstr)
outstr = re.sub(r'default="(.+?)"', r'<span class="info">default value: <code>\1</code></span><br/>', outstr)
outstr = re.sub(r'fixed="(.+?)"', r'<span class="info">fixed value: <code>\1</code></span><br/>', outstr)
outstr = re.sub(r'xs:enumeration value="(.+?)"', r'<span class="info">allowed value: <code>\1</code></span><br/>', outstr)
outstr = re.sub(r'xs:pattern value="(.+?)"', r'<span class="info">must match (regular expression): <code>\1</code></span><br/>', outstr)
outstr = re.sub(r'type="(xs:)?(.+?)"', r'<span class="info">datatype: \2</span><br/>', outstr)
outstr = re.sub(r'minOccurs="0"', r'<span class="info">required: optional.</span><br/>', outstr)
outstr = re.sub(r'minOccurs="([0-9]+)"', r'<span class="info">required: at least \1 times</span><br/>', outstr)
outstr = re.sub(r'maxOccurs="1"', r'<span class="info">Multiple not allowed</span><br/>', outstr)
outstr = re.sub(r'maxOccurs="unbounded"', r'\n', outstr)
#putting in links
outstr = re.sub(r'(datatype|derived from): (locationType|dateTimeDurationType|yesNoEnum|sexRestrictedEnum|dateTimeOlsonDefaultPacific|timeOlson|dateTimeNoTZ|timeNoTZ)', r'\1: <a href="#\2"><code>\2</code></a>\n', outstr)
outstr = re.sub(r'(datatype|derived from): (string)', r'\1: <a href="http://www.w3schools.com/Schema/schema_dtypes_string.asp"><code>\2</code></a>\n', outstr)
outstr = re.sub(r'(datatype|derived from): (dateTime|date|time|duration)', r'\1: <a href="http://www.w3schools.com/Schema/schema_dtypes_date.asp"><code>\2</code></a>\n', outstr)
outstr = re.sub(r'(datatype|derived from): (integer|decimal)', r'\1: <a href="http://www.w3schools.com/Schema/schema_dtypes_numeric.asp"><code>\2</code></a>\n', outstr)
#Drop stuff we don't care about
outstr = re.sub(r'/?xs:(simpleContent|complexType)', r'', outstr)
#clean-up
outstr = re.sub(r'&&', r'<', outstr)
outstr = re.sub(r'@@', r'>', outstr)
outstr = re.sub(r'\s*<br/>', r'<br/>\n', outstr)
print "<html>"
print "<head>"
print "<title>Footprint XML Specification Version", version, "</title>"
#print '<LINK REL="StyleSheet" HREF="spec.css" TYPE="text/css"/>'
print "<style>"
cssfh = open('spec.css')
print cssfh.read()
print "</style>"
print "</head>"
print "<body>"
print '<div class="titleText">Footprint XML Specification Version', version, '</div><br>'
print outstr
print "</body></html>"
main()
| Python |
#!/usr/bin/env python
#
# Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Exports TSV data over HTTP.
Usage:
%s [flags]
--url=<string> URL endpoint to get exported data. (Required)
--batch_size=<int> Number of Entity objects to include in each post to
smaller the batch size should be. (Default 1000)
--filename=<path> Path to the TSV file to export. (Required)
--digsig=<string> value passed to endpoint permitting export
The exit status will be 0 on success, non-zero on failure.
"""
import sys
import re
import logging
import getopt
import urllib2
import datetime
def PrintUsageExit(code):
print sys.modules['__main__'].__doc__ % sys.argv[0]
sys.stdout.flush()
sys.stderr.flush()
sys.exit(code)
def Pull(filename, url, min_key, delim, prefix):
# get content from url and write to filename
try:
connection = urllib2.urlopen(url);
# TODO: read 100 lines incrementally and show progress
content = connection.read()
connection.close()
except urllib2.URLError, e:
logging.error('%s returned error %i, %s' % (url, e.code, e.msg))
sys.exit(2)
try:
tsv_file = file(filename, 'a')
except IOError:
logging.error("I/O error({0}): {1}".format(errno, os.strerror(errno)))
sys.exit(3)
if prefix:
lines = content.split("\n")
lines.pop()
content = ("%s" % prefix) + ("\n%s" % prefix).join(lines) + "\n"
tsv_file.write(content)
tsv_file.close()
# count the number of lines
list = content.splitlines()
line_count = len(list)
last_line = list[line_count - 1]
if min_key == "":
# that's our header, don't count it
line_count -= 1
# get the key value of the last line
fields = last_line.split(delim)
min_key = fields[0][4:]
return min_key, line_count
def ParseArguments(argv):
opts, args = getopt.getopt(
argv[1:],
'dh',
['debug', 'help',
'url=', 'filename=', 'prefix=', 'digsig=', 'batch_size='
])
url = None
filename = None
digsig = ''
prefix = ''
batch_size = 1000
for option, value in opts:
if option == '--debug':
logging.getLogger().setLevel(logging.DEBUG)
if option in ('-h', '--help'):
PrintUsageExit(0)
if option == '--url':
url = value
if option == '--filename':
filename = value
if option == '--prefix':
prefix = value
if option == '--digsig':
digsig = value
if option == '--batch_size':
batch_size = int(value)
if batch_size <= 0:
print >>sys.stderr, 'batch_size must be 1 or larger'
PrintUsageExit(1)
return (url, filename, batch_size, prefix, digsig)
def main(argv):
logging.basicConfig(
level=logging.INFO,
format='%(levelname)-8s %(asctime)s %(message)s')
args = ParseArguments(argv)
if [arg for arg in args if arg is None]:
print >>sys.stderr, 'Invalid arguments'
PrintUsageExit(1)
url, filename, batch_size, prefix, digsig = args
delim = "\t"
min_key = ""
lines = batch_size + 2
while lines >= batch_size:
url_step = ("%s?digsig=%s&min_key=%s&limit=%s" %
(url, str(digsig), str(min_key), str(batch_size)))
if min_key != "":
log_key = min_key
else:
log_key = "[start]"
t0 = datetime.datetime.now()
min_key, lines = Pull(filename, url_step, min_key, delim, prefix)
#print min_key
diff = datetime.datetime.now() - t0
secs = "%d.%d" % (diff.seconds, diff.microseconds/1000)
logging.info('fetched header + %d in %s secs from %s', lines, secs, log_key)
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
| Python |
"""
Copyright (c) 2003-2007 Gustavo Niemeyer <gustavo@niemeyer.net>
This module offers extensions to the standard python 2.3+
datetime module.
"""
__author__ = "Gustavo Niemeyer <gustavo@niemeyer.net>"
__license__ = "PSF License"
__version__ = "1.4.1"
| Python |
"""
Copyright (c) 2003-2007 Gustavo Niemeyer <gustavo@niemeyer.net>
This module offers extensions to the standard python 2.3+
datetime module.
"""
__author__ = "Gustavo Niemeyer <gustavo@niemeyer.net>"
__license__ = "PSF License"
import itertools
import datetime
import calendar
import thread
import sys
__all__ = ["rrule", "rruleset", "rrulestr",
"YEARLY", "MONTHLY", "WEEKLY", "DAILY",
"HOURLY", "MINUTELY", "SECONDLY",
"MO", "TU", "WE", "TH", "FR", "SA", "SU"]
# Every mask is 7 days longer to handle cross-year weekly periods.
M366MASK = tuple([1]*31+[2]*29+[3]*31+[4]*30+[5]*31+[6]*30+
[7]*31+[8]*31+[9]*30+[10]*31+[11]*30+[12]*31+[1]*7)
M365MASK = list(M366MASK)
M29, M30, M31 = range(1,30), range(1,31), range(1,32)
MDAY366MASK = tuple(M31+M29+M31+M30+M31+M30+M31+M31+M30+M31+M30+M31+M31[:7])
MDAY365MASK = list(MDAY366MASK)
M29, M30, M31 = range(-29,0), range(-30,0), range(-31,0)
NMDAY366MASK = tuple(M31+M29+M31+M30+M31+M30+M31+M31+M30+M31+M30+M31+M31[:7])
NMDAY365MASK = list(NMDAY366MASK)
M366RANGE = (0,31,60,91,121,152,182,213,244,274,305,335,366)
M365RANGE = (0,31,59,90,120,151,181,212,243,273,304,334,365)
WDAYMASK = [0,1,2,3,4,5,6]*55
del M29, M30, M31, M365MASK[59], MDAY365MASK[59], NMDAY365MASK[31]
MDAY365MASK = tuple(MDAY365MASK)
M365MASK = tuple(M365MASK)
(YEARLY,
MONTHLY,
WEEKLY,
DAILY,
HOURLY,
MINUTELY,
SECONDLY) = range(7)
# Imported on demand.
easter = None
parser = None
class weekday(object):
__slots__ = ["weekday", "n"]
def __init__(self, weekday, n=None):
if n == 0:
raise ValueError, "Can't create weekday with n == 0"
self.weekday = weekday
self.n = n
def __call__(self, n):
if n == self.n:
return self
else:
return self.__class__(self.weekday, n)
def __eq__(self, other):
try:
if self.weekday != other.weekday or self.n != other.n:
return False
except AttributeError:
return False
return True
def __repr__(self):
s = ("MO", "TU", "WE", "TH", "FR", "SA", "SU")[self.weekday]
if not self.n:
return s
else:
return "%s(%+d)" % (s, self.n)
MO, TU, WE, TH, FR, SA, SU = weekdays = tuple([weekday(x) for x in range(7)])
class rrulebase:
def __init__(self, cache=False):
if cache:
self._cache = []
self._cache_lock = thread.allocate_lock()
self._cache_gen = self._iter()
self._cache_complete = False
else:
self._cache = None
self._cache_complete = False
self._len = None
def __iter__(self):
if self._cache_complete:
return iter(self._cache)
elif self._cache is None:
return self._iter()
else:
return self._iter_cached()
def _iter_cached(self):
i = 0
gen = self._cache_gen
cache = self._cache
acquire = self._cache_lock.acquire
release = self._cache_lock.release
while gen:
if i == len(cache):
acquire()
if self._cache_complete:
break
try:
for j in range(10):
cache.append(gen.next())
except StopIteration:
self._cache_gen = gen = None
self._cache_complete = True
break
release()
yield cache[i]
i += 1
while i < self._len:
yield cache[i]
i += 1
def __getitem__(self, item):
if self._cache_complete:
return self._cache[item]
elif isinstance(item, slice):
if item.step and item.step < 0:
return list(iter(self))[item]
else:
return list(itertools.islice(self,
item.start or 0,
item.stop or sys.maxint,
item.step or 1))
elif item >= 0:
gen = iter(self)
try:
for i in range(item+1):
res = gen.next()
except StopIteration:
raise IndexError
return res
else:
return list(iter(self))[item]
def __contains__(self, item):
if self._cache_complete:
return item in self._cache
else:
for i in self:
if i == item:
return True
elif i > item:
return False
return False
# __len__() introduces a large performance penality.
def count(self):
if self._len is None:
for x in self: pass
return self._len
def before(self, dt, inc=False):
if self._cache_complete:
gen = self._cache
else:
gen = self
last = None
if inc:
for i in gen:
if i > dt:
break
last = i
else:
for i in gen:
if i >= dt:
break
last = i
return last
def after(self, dt, inc=False):
if self._cache_complete:
gen = self._cache
else:
gen = self
if inc:
for i in gen:
if i >= dt:
return i
else:
for i in gen:
if i > dt:
return i
return None
def between(self, after, before, inc=False):
if self._cache_complete:
gen = self._cache
else:
gen = self
started = False
l = []
if inc:
for i in gen:
if i > before:
break
elif not started:
if i >= after:
started = True
l.append(i)
else:
l.append(i)
else:
for i in gen:
if i >= before:
break
elif not started:
if i > after:
started = True
l.append(i)
else:
l.append(i)
return l
class rrule(rrulebase):
def __init__(self, freq, dtstart=None,
interval=1, wkst=None, count=None, until=None, bysetpos=None,
bymonth=None, bymonthday=None, byyearday=None, byeaster=None,
byweekno=None, byweekday=None,
byhour=None, byminute=None, bysecond=None,
cache=False):
rrulebase.__init__(self, cache)
global easter
if not dtstart:
dtstart = datetime.datetime.now().replace(microsecond=0)
elif not isinstance(dtstart, datetime.datetime):
dtstart = datetime.datetime.fromordinal(dtstart.toordinal())
else:
dtstart = dtstart.replace(microsecond=0)
self._dtstart = dtstart
self._tzinfo = dtstart.tzinfo
self._freq = freq
self._interval = interval
self._count = count
if until and not isinstance(until, datetime.datetime):
until = datetime.datetime.fromordinal(until.toordinal())
self._until = until
if wkst is None:
self._wkst = calendar.firstweekday()
elif type(wkst) is int:
self._wkst = wkst
else:
self._wkst = wkst.weekday
if bysetpos is None:
self._bysetpos = None
elif type(bysetpos) is int:
if bysetpos == 0 or not (-366 <= bysetpos <= 366):
raise ValueError("bysetpos must be between 1 and 366, "
"or between -366 and -1")
self._bysetpos = (bysetpos,)
else:
self._bysetpos = tuple(bysetpos)
for pos in self._bysetpos:
if pos == 0 or not (-366 <= pos <= 366):
raise ValueError("bysetpos must be between 1 and 366, "
"or between -366 and -1")
if not (byweekno or byyearday or bymonthday or
byweekday is not None or byeaster is not None):
if freq == YEARLY:
if not bymonth:
bymonth = dtstart.month
bymonthday = dtstart.day
elif freq == MONTHLY:
bymonthday = dtstart.day
elif freq == WEEKLY:
byweekday = dtstart.weekday()
# bymonth
if not bymonth:
self._bymonth = None
elif type(bymonth) is int:
self._bymonth = (bymonth,)
else:
self._bymonth = tuple(bymonth)
# byyearday
if not byyearday:
self._byyearday = None
elif type(byyearday) is int:
self._byyearday = (byyearday,)
else:
self._byyearday = tuple(byyearday)
# byeaster
if byeaster is not None:
if not easter:
from dateutil import easter
if type(byeaster) is int:
self._byeaster = (byeaster,)
else:
self._byeaster = tuple(byeaster)
else:
self._byeaster = None
# bymonthay
if not bymonthday:
self._bymonthday = ()
self._bynmonthday = ()
elif type(bymonthday) is int:
if bymonthday < 0:
self._bynmonthday = (bymonthday,)
self._bymonthday = ()
else:
self._bymonthday = (bymonthday,)
self._bynmonthday = ()
else:
self._bymonthday = tuple([x for x in bymonthday if x > 0])
self._bynmonthday = tuple([x for x in bymonthday if x < 0])
# byweekno
if byweekno is None:
self._byweekno = None
elif type(byweekno) is int:
self._byweekno = (byweekno,)
else:
self._byweekno = tuple(byweekno)
# byweekday / bynweekday
if byweekday is None:
self._byweekday = None
self._bynweekday = None
elif type(byweekday) is int:
self._byweekday = (byweekday,)
self._bynweekday = None
elif hasattr(byweekday, "n"):
if not byweekday.n or freq > MONTHLY:
self._byweekday = (byweekday.weekday,)
self._bynweekday = None
else:
self._bynweekday = ((byweekday.weekday, byweekday.n),)
self._byweekday = None
else:
self._byweekday = []
self._bynweekday = []
for wday in byweekday:
if type(wday) is int:
self._byweekday.append(wday)
elif not wday.n or freq > MONTHLY:
self._byweekday.append(wday.weekday)
else:
self._bynweekday.append((wday.weekday, wday.n))
self._byweekday = tuple(self._byweekday)
self._bynweekday = tuple(self._bynweekday)
if not self._byweekday:
self._byweekday = None
elif not self._bynweekday:
self._bynweekday = None
# byhour
if byhour is None:
if freq < HOURLY:
self._byhour = (dtstart.hour,)
else:
self._byhour = None
elif type(byhour) is int:
self._byhour = (byhour,)
else:
self._byhour = tuple(byhour)
# byminute
if byminute is None:
if freq < MINUTELY:
self._byminute = (dtstart.minute,)
else:
self._byminute = None
elif type(byminute) is int:
self._byminute = (byminute,)
else:
self._byminute = tuple(byminute)
# bysecond
if bysecond is None:
if freq < SECONDLY:
self._bysecond = (dtstart.second,)
else:
self._bysecond = None
elif type(bysecond) is int:
self._bysecond = (bysecond,)
else:
self._bysecond = tuple(bysecond)
if self._freq >= HOURLY:
self._timeset = None
else:
self._timeset = []
for hour in self._byhour:
for minute in self._byminute:
for second in self._bysecond:
self._timeset.append(
datetime.time(hour, minute, second,
tzinfo=self._tzinfo))
self._timeset.sort()
self._timeset = tuple(self._timeset)
def _iter(self):
year, month, day, hour, minute, second, weekday, yearday, _ = \
self._dtstart.timetuple()
# Some local variables to speed things up a bit
freq = self._freq
interval = self._interval
wkst = self._wkst
until = self._until
bymonth = self._bymonth
byweekno = self._byweekno
byyearday = self._byyearday
byweekday = self._byweekday
byeaster = self._byeaster
bymonthday = self._bymonthday
bynmonthday = self._bynmonthday
bysetpos = self._bysetpos
byhour = self._byhour
byminute = self._byminute
bysecond = self._bysecond
ii = _iterinfo(self)
ii.rebuild(year, month)
getdayset = {YEARLY:ii.ydayset,
MONTHLY:ii.mdayset,
WEEKLY:ii.wdayset,
DAILY:ii.ddayset,
HOURLY:ii.ddayset,
MINUTELY:ii.ddayset,
SECONDLY:ii.ddayset}[freq]
if freq < HOURLY:
timeset = self._timeset
else:
gettimeset = {HOURLY:ii.htimeset,
MINUTELY:ii.mtimeset,
SECONDLY:ii.stimeset}[freq]
if ((freq >= HOURLY and
self._byhour and hour not in self._byhour) or
(freq >= MINUTELY and
self._byminute and minute not in self._byminute) or
(freq >= SECONDLY and
self._bysecond and minute not in self._bysecond)):
timeset = ()
else:
timeset = gettimeset(hour, minute, second)
total = 0
count = self._count
while True:
# Get dayset with the right frequency
dayset, start, end = getdayset(year, month, day)
# Do the "hard" work ;-)
filtered = False
for i in dayset[start:end]:
if ((bymonth and ii.mmask[i] not in bymonth) or
(byweekno and not ii.wnomask[i]) or
(byweekday and ii.wdaymask[i] not in byweekday) or
(ii.nwdaymask and not ii.nwdaymask[i]) or
(byeaster and not ii.eastermask[i]) or
((bymonthday or bynmonthday) and
ii.mdaymask[i] not in bymonthday and
ii.nmdaymask[i] not in bynmonthday) or
(byyearday and
((i < ii.yearlen and i+1 not in byyearday
and -ii.yearlen+i not in byyearday) or
(i >= ii.yearlen and i+1-ii.yearlen not in byyearday
and -ii.nextyearlen+i-ii.yearlen
not in byyearday)))):
dayset[i] = None
filtered = True
# Output results
if bysetpos and timeset:
poslist = []
for pos in bysetpos:
if pos < 0:
daypos, timepos = divmod(pos, len(timeset))
else:
daypos, timepos = divmod(pos-1, len(timeset))
try:
i = [x for x in dayset[start:end]
if x is not None][daypos]
time = timeset[timepos]
except IndexError:
pass
else:
date = datetime.date.fromordinal(ii.yearordinal+i)
res = datetime.datetime.combine(date, time)
if res not in poslist:
poslist.append(res)
poslist.sort()
for res in poslist:
if until and res > until:
self._len = total
return
elif res >= self._dtstart:
total += 1
yield res
if count:
count -= 1
if not count:
self._len = total
return
else:
for i in dayset[start:end]:
if i is not None:
date = datetime.date.fromordinal(ii.yearordinal+i)
for time in timeset:
res = datetime.datetime.combine(date, time)
if until and res > until:
self._len = total
return
elif res >= self._dtstart:
total += 1
yield res
if count:
count -= 1
if not count:
self._len = total
return
# Handle frequency and interval
fixday = False
if freq == YEARLY:
year += interval
if year > datetime.MAXYEAR:
self._len = total
return
ii.rebuild(year, month)
elif freq == MONTHLY:
month += interval
if month > 12:
div, mod = divmod(month, 12)
month = mod
year += div
if month == 0:
month = 12
year -= 1
if year > datetime.MAXYEAR:
self._len = total
return
ii.rebuild(year, month)
elif freq == WEEKLY:
if wkst > weekday:
day += -(weekday+1+(6-wkst))+self._interval*7
else:
day += -(weekday-wkst)+self._interval*7
weekday = wkst
fixday = True
elif freq == DAILY:
day += interval
fixday = True
elif freq == HOURLY:
if filtered:
# Jump to one iteration before next day
hour += ((23-hour)//interval)*interval
while True:
hour += interval
div, mod = divmod(hour, 24)
if div:
hour = mod
day += div
fixday = True
if not byhour or hour in byhour:
break
timeset = gettimeset(hour, minute, second)
elif freq == MINUTELY:
if filtered:
# Jump to one iteration before next day
minute += ((1439-(hour*60+minute))//interval)*interval
while True:
minute += interval
div, mod = divmod(minute, 60)
if div:
minute = mod
hour += div
div, mod = divmod(hour, 24)
if div:
hour = mod
day += div
fixday = True
filtered = False
if ((not byhour or hour in byhour) and
(not byminute or minute in byminute)):
break
timeset = gettimeset(hour, minute, second)
elif freq == SECONDLY:
if filtered:
# Jump to one iteration before next day
second += (((86399-(hour*3600+minute*60+second))
//interval)*interval)
while True:
second += self._interval
div, mod = divmod(second, 60)
if div:
second = mod
minute += div
div, mod = divmod(minute, 60)
if div:
minute = mod
hour += div
div, mod = divmod(hour, 24)
if div:
hour = mod
day += div
fixday = True
if ((not byhour or hour in byhour) and
(not byminute or minute in byminute) and
(not bysecond or second in bysecond)):
break
timeset = gettimeset(hour, minute, second)
if fixday and day > 28:
daysinmonth = calendar.monthrange(year, month)[1]
if day > daysinmonth:
while day > daysinmonth:
day -= daysinmonth
month += 1
if month == 13:
month = 1
year += 1
if year > datetime.MAXYEAR:
self._len = total
return
daysinmonth = calendar.monthrange(year, month)[1]
ii.rebuild(year, month)
class _iterinfo(object):
__slots__ = ["rrule", "lastyear", "lastmonth",
"yearlen", "nextyearlen", "yearordinal", "yearweekday",
"mmask", "mrange", "mdaymask", "nmdaymask",
"wdaymask", "wnomask", "nwdaymask", "eastermask"]
def __init__(self, rrule):
for attr in self.__slots__:
setattr(self, attr, None)
self.rrule = rrule
def rebuild(self, year, month):
# Every mask is 7 days longer to handle cross-year weekly periods.
rr = self.rrule
if year != self.lastyear:
self.yearlen = 365+calendar.isleap(year)
self.nextyearlen = 365+calendar.isleap(year+1)
firstyday = datetime.date(year, 1, 1)
self.yearordinal = firstyday.toordinal()
self.yearweekday = firstyday.weekday()
wday = datetime.date(year, 1, 1).weekday()
if self.yearlen == 365:
self.mmask = M365MASK
self.mdaymask = MDAY365MASK
self.nmdaymask = NMDAY365MASK
self.wdaymask = WDAYMASK[wday:]
self.mrange = M365RANGE
else:
self.mmask = M366MASK
self.mdaymask = MDAY366MASK
self.nmdaymask = NMDAY366MASK
self.wdaymask = WDAYMASK[wday:]
self.mrange = M366RANGE
if not rr._byweekno:
self.wnomask = None
else:
self.wnomask = [0]*(self.yearlen+7)
#no1wkst = firstwkst = self.wdaymask.index(rr._wkst)
no1wkst = firstwkst = (7-self.yearweekday+rr._wkst)%7
if no1wkst >= 4:
no1wkst = 0
# Number of days in the year, plus the days we got
# from last year.
wyearlen = self.yearlen+(self.yearweekday-rr._wkst)%7
else:
# Number of days in the year, minus the days we
# left in last year.
wyearlen = self.yearlen-no1wkst
div, mod = divmod(wyearlen, 7)
numweeks = div+mod//4
for n in rr._byweekno:
if n < 0:
n += numweeks+1
if not (0 < n <= numweeks):
continue
if n > 1:
i = no1wkst+(n-1)*7
if no1wkst != firstwkst:
i -= 7-firstwkst
else:
i = no1wkst
for j in range(7):
self.wnomask[i] = 1
i += 1
if self.wdaymask[i] == rr._wkst:
break
if 1 in rr._byweekno:
# Check week number 1 of next year as well
# TODO: Check -numweeks for next year.
i = no1wkst+numweeks*7
if no1wkst != firstwkst:
i -= 7-firstwkst
if i < self.yearlen:
# If week starts in next year, we
# don't care about it.
for j in range(7):
self.wnomask[i] = 1
i += 1
if self.wdaymask[i] == rr._wkst:
break
if no1wkst:
# Check last week number of last year as
# well. If no1wkst is 0, either the year
# started on week start, or week number 1
# got days from last year, so there are no
# days from last year's last week number in
# this year.
if -1 not in rr._byweekno:
lyearweekday = datetime.date(year-1,1,1).weekday()
lno1wkst = (7-lyearweekday+rr._wkst)%7
lyearlen = 365+calendar.isleap(year-1)
if lno1wkst >= 4:
lno1wkst = 0
lnumweeks = 52+(lyearlen+
(lyearweekday-rr._wkst)%7)%7//4
else:
lnumweeks = 52+(self.yearlen-no1wkst)%7//4
else:
lnumweeks = -1
if lnumweeks in rr._byweekno:
for i in range(no1wkst):
self.wnomask[i] = 1
if (rr._bynweekday and
(month != self.lastmonth or year != self.lastyear)):
ranges = []
if rr._freq == YEARLY:
if rr._bymonth:
for month in rr._bymonth:
ranges.append(self.mrange[month-1:month+1])
else:
ranges = [(0, self.yearlen)]
elif rr._freq == MONTHLY:
ranges = [self.mrange[month-1:month+1]]
if ranges:
# Weekly frequency won't get here, so we may not
# care about cross-year weekly periods.
self.nwdaymask = [0]*self.yearlen
for first, last in ranges:
last -= 1
for wday, n in rr._bynweekday:
if n < 0:
i = last+(n+1)*7
i -= (self.wdaymask[i]-wday)%7
else:
i = first+(n-1)*7
i += (7-self.wdaymask[i]+wday)%7
if first <= i <= last:
self.nwdaymask[i] = 1
if rr._byeaster:
self.eastermask = [0]*(self.yearlen+7)
eyday = easter.easter(year).toordinal()-self.yearordinal
for offset in rr._byeaster:
self.eastermask[eyday+offset] = 1
self.lastyear = year
self.lastmonth = month
def ydayset(self, year, month, day):
return range(self.yearlen), 0, self.yearlen
def mdayset(self, year, month, day):
set = [None]*self.yearlen
start, end = self.mrange[month-1:month+1]
for i in range(start, end):
set[i] = i
return set, start, end
def wdayset(self, year, month, day):
# We need to handle cross-year weeks here.
set = [None]*(self.yearlen+7)
i = datetime.date(year, month, day).toordinal()-self.yearordinal
start = i
for j in range(7):
set[i] = i
i += 1
#if (not (0 <= i < self.yearlen) or
# self.wdaymask[i] == self.rrule._wkst):
# This will cross the year boundary, if necessary.
if self.wdaymask[i] == self.rrule._wkst:
break
return set, start, i
def ddayset(self, year, month, day):
set = [None]*self.yearlen
i = datetime.date(year, month, day).toordinal()-self.yearordinal
set[i] = i
return set, i, i+1
def htimeset(self, hour, minute, second):
set = []
rr = self.rrule
for minute in rr._byminute:
for second in rr._bysecond:
set.append(datetime.time(hour, minute, second,
tzinfo=rr._tzinfo))
set.sort()
return set
def mtimeset(self, hour, minute, second):
set = []
rr = self.rrule
for second in rr._bysecond:
set.append(datetime.time(hour, minute, second, tzinfo=rr._tzinfo))
set.sort()
return set
def stimeset(self, hour, minute, second):
return (datetime.time(hour, minute, second,
tzinfo=self.rrule._tzinfo),)
class rruleset(rrulebase):
class _genitem:
def __init__(self, genlist, gen):
try:
self.dt = gen()
genlist.append(self)
except StopIteration:
pass
self.genlist = genlist
self.gen = gen
def next(self):
try:
self.dt = self.gen()
except StopIteration:
self.genlist.remove(self)
def __cmp__(self, other):
return cmp(self.dt, other.dt)
def __init__(self, cache=False):
rrulebase.__init__(self, cache)
self._rrule = []
self._rdate = []
self._exrule = []
self._exdate = []
def rrule(self, rrule):
self._rrule.append(rrule)
def rdate(self, rdate):
self._rdate.append(rdate)
def exrule(self, exrule):
self._exrule.append(exrule)
def exdate(self, exdate):
self._exdate.append(exdate)
def _iter(self):
rlist = []
self._rdate.sort()
self._genitem(rlist, iter(self._rdate).next)
for gen in [iter(x).next for x in self._rrule]:
self._genitem(rlist, gen)
rlist.sort()
exlist = []
self._exdate.sort()
self._genitem(exlist, iter(self._exdate).next)
for gen in [iter(x).next for x in self._exrule]:
self._genitem(exlist, gen)
exlist.sort()
lastdt = None
total = 0
while rlist:
ritem = rlist[0]
if not lastdt or lastdt != ritem.dt:
while exlist and exlist[0] < ritem:
exlist[0].next()
exlist.sort()
if not exlist or ritem != exlist[0]:
total += 1
yield ritem.dt
lastdt = ritem.dt
ritem.next()
rlist.sort()
self._len = total
class _rrulestr:
_freq_map = {"YEARLY": YEARLY,
"MONTHLY": MONTHLY,
"WEEKLY": WEEKLY,
"DAILY": DAILY,
"HOURLY": HOURLY,
"MINUTELY": MINUTELY,
"SECONDLY": SECONDLY}
_weekday_map = {"MO":0,"TU":1,"WE":2,"TH":3,"FR":4,"SA":5,"SU":6}
def _handle_int(self, rrkwargs, name, value, **kwargs):
rrkwargs[name.lower()] = int(value)
def _handle_int_list(self, rrkwargs, name, value, **kwargs):
rrkwargs[name.lower()] = [int(x) for x in value.split(',')]
_handle_INTERVAL = _handle_int
_handle_COUNT = _handle_int
_handle_BYSETPOS = _handle_int_list
_handle_BYMONTH = _handle_int_list
_handle_BYMONTHDAY = _handle_int_list
_handle_BYYEARDAY = _handle_int_list
_handle_BYEASTER = _handle_int_list
_handle_BYWEEKNO = _handle_int_list
_handle_BYHOUR = _handle_int_list
_handle_BYMINUTE = _handle_int_list
_handle_BYSECOND = _handle_int_list
def _handle_FREQ(self, rrkwargs, name, value, **kwargs):
rrkwargs["freq"] = self._freq_map[value]
def _handle_UNTIL(self, rrkwargs, name, value, **kwargs):
global parser
if not parser:
from dateutil import parser
try:
rrkwargs["until"] = parser.parse(value,
ignoretz=kwargs.get("ignoretz"),
tzinfos=kwargs.get("tzinfos"))
except ValueError:
raise ValueError, "invalid until date"
def _handle_WKST(self, rrkwargs, name, value, **kwargs):
rrkwargs["wkst"] = self._weekday_map[value]
def _handle_BYWEEKDAY(self, rrkwargs, name, value, **kwarsg):
l = []
for wday in value.split(','):
for i in range(len(wday)):
if wday[i] not in '+-0123456789':
break
n = wday[:i] or None
w = wday[i:]
if n: n = int(n)
l.append(weekdays[self._weekday_map[w]](n))
rrkwargs["byweekday"] = l
_handle_BYDAY = _handle_BYWEEKDAY
def _parse_rfc_rrule(self, line,
dtstart=None,
cache=False,
ignoretz=False,
tzinfos=None):
if line.find(':') != -1:
name, value = line.split(':')
if name != "RRULE":
raise ValueError, "unknown parameter name"
else:
value = line
rrkwargs = {}
for pair in value.split(';'):
name, value = pair.split('=')
name = name.upper()
value = value.upper()
try:
getattr(self, "_handle_"+name)(rrkwargs, name, value,
ignoretz=ignoretz,
tzinfos=tzinfos)
except AttributeError:
raise ValueError, "unknown parameter '%s'" % name
except (KeyError, ValueError):
raise ValueError, "invalid '%s': %s" % (name, value)
return rrule(dtstart=dtstart, cache=cache, **rrkwargs)
def _parse_rfc(self, s,
dtstart=None,
cache=False,
unfold=False,
forceset=False,
compatible=False,
ignoretz=False,
tzinfos=None):
global parser
if compatible:
forceset = True
unfold = True
s = s.upper()
if not s.strip():
raise ValueError, "empty string"
if unfold:
lines = s.splitlines()
i = 0
while i < len(lines):
line = lines[i].rstrip()
if not line:
del lines[i]
elif i > 0 and line[0] == " ":
lines[i-1] += line[1:]
del lines[i]
else:
i += 1
else:
lines = s.split()
if (not forceset and len(lines) == 1 and
(s.find(':') == -1 or s.startswith('RRULE:'))):
return self._parse_rfc_rrule(lines[0], cache=cache,
dtstart=dtstart, ignoretz=ignoretz,
tzinfos=tzinfos)
else:
rrulevals = []
rdatevals = []
exrulevals = []
exdatevals = []
for line in lines:
if not line:
continue
if line.find(':') == -1:
name = "RRULE"
value = line
else:
name, value = line.split(':', 1)
parms = name.split(';')
if not parms:
raise ValueError, "empty property name"
name = parms[0]
parms = parms[1:]
if name == "RRULE":
for parm in parms:
raise ValueError, "unsupported RRULE parm: "+parm
rrulevals.append(value)
elif name == "RDATE":
for parm in parms:
if parm != "VALUE=DATE-TIME":
raise ValueError, "unsupported RDATE parm: "+parm
rdatevals.append(value)
elif name == "EXRULE":
for parm in parms:
raise ValueError, "unsupported EXRULE parm: "+parm
exrulevals.append(value)
elif name == "EXDATE":
for parm in parms:
if parm != "VALUE=DATE-TIME":
raise ValueError, "unsupported RDATE parm: "+parm
exdatevals.append(value)
elif name == "DTSTART":
for parm in parms:
raise ValueError, "unsupported DTSTART parm: "+parm
if not parser:
from dateutil import parser
dtstart = parser.parse(value, ignoretz=ignoretz,
tzinfos=tzinfos)
else:
raise ValueError, "unsupported property: "+name
if (forceset or len(rrulevals) > 1 or
rdatevals or exrulevals or exdatevals):
if not parser and (rdatevals or exdatevals):
from dateutil import parser
set = rruleset(cache=cache)
for value in rrulevals:
set.rrule(self._parse_rfc_rrule(value, dtstart=dtstart,
ignoretz=ignoretz,
tzinfos=tzinfos))
for value in rdatevals:
for datestr in value.split(','):
set.rdate(parser.parse(datestr,
ignoretz=ignoretz,
tzinfos=tzinfos))
for value in exrulevals:
set.exrule(self._parse_rfc_rrule(value, dtstart=dtstart,
ignoretz=ignoretz,
tzinfos=tzinfos))
for value in exdatevals:
for datestr in value.split(','):
set.exdate(parser.parse(datestr,
ignoretz=ignoretz,
tzinfos=tzinfos))
if compatible and dtstart:
set.rdate(dtstart)
return set
else:
return self._parse_rfc_rrule(rrulevals[0],
dtstart=dtstart,
cache=cache,
ignoretz=ignoretz,
tzinfos=tzinfos)
def __call__(self, s, **kwargs):
return self._parse_rfc(s, **kwargs)
rrulestr = _rrulestr()
# vim:ts=4:sw=4:et
| Python |
"""
Copyright (c) 2003-2007 Gustavo Niemeyer <gustavo@niemeyer.net>
This module offers extensions to the standard python 2.3+
datetime module.
"""
__author__ = "Gustavo Niemeyer <gustavo@niemeyer.net>"
__license__ = "PSF License"
import datetime
__all__ = ["easter", "EASTER_JULIAN", "EASTER_ORTHODOX", "EASTER_WESTERN"]
EASTER_JULIAN = 1
EASTER_ORTHODOX = 2
EASTER_WESTERN = 3
def easter(year, method=EASTER_WESTERN):
"""
This method was ported from the work done by GM Arts,
on top of the algorithm by Claus Tondering, which was
based in part on the algorithm of Ouding (1940), as
quoted in "Explanatory Supplement to the Astronomical
Almanac", P. Kenneth Seidelmann, editor.
This algorithm implements three different easter
calculation methods:
1 - Original calculation in Julian calendar, valid in
dates after 326 AD
2 - Original method, with date converted to Gregorian
calendar, valid in years 1583 to 4099
3 - Revised method, in Gregorian calendar, valid in
years 1583 to 4099 as well
These methods are represented by the constants:
EASTER_JULIAN = 1
EASTER_ORTHODOX = 2
EASTER_WESTERN = 3
The default method is method 3.
More about the algorithm may be found at:
http://users.chariot.net.au/~gmarts/eastalg.htm
and
http://www.tondering.dk/claus/calendar.html
"""
if not (1 <= method <= 3):
raise ValueError, "invalid method"
# g - Golden year - 1
# c - Century
# h - (23 - Epact) mod 30
# i - Number of days from March 21 to Paschal Full Moon
# j - Weekday for PFM (0=Sunday, etc)
# p - Number of days from March 21 to Sunday on or before PFM
# (-6 to 28 methods 1 & 3, to 56 for method 2)
# e - Extra days to add for method 2 (converting Julian
# date to Gregorian date)
y = year
g = y % 19
e = 0
if method < 3:
# Old method
i = (19*g+15)%30
j = (y+y//4+i)%7
if method == 2:
# Extra dates to convert Julian to Gregorian date
e = 10
if y > 1600:
e = e+y//100-16-(y//100-16)//4
else:
# New method
c = y//100
h = (c-c//4-(8*c+13)//25+19*g+15)%30
i = h-(h//28)*(1-(h//28)*(29//(h+1))*((21-g)//11))
j = (y+y//4+i+2-c+c//4)%7
# p can be from -6 to 56 corresponding to dates 22 March to 23 May
# (later dates apply to method 2, although 23 May never actually occurs)
p = i-j+e
d = 1+(p+27+(p+6)//40)%31
m = 3+(p+26)//30
return datetime.date(int(y),int(m),int(d))
| Python |
"""
Copyright (c) 2003-2007 Gustavo Niemeyer <gustavo@niemeyer.net>
This module offers extensions to the standard python 2.3+
datetime module.
"""
__author__ = "Gustavo Niemeyer <gustavo@niemeyer.net>"
__license__ = "PSF License"
import datetime
import struct
import time
import sys
import os
relativedelta = None
parser = None
rrule = None
__all__ = ["tzutc", "tzoffset", "tzlocal", "tzfile", "tzrange",
"tzstr", "tzical", "tzwin", "tzwinlocal", "gettz"]
try:
from dateutil.tzwin import tzwin, tzwinlocal
except (ImportError, OSError):
tzwin, tzwinlocal = None, None
ZERO = datetime.timedelta(0)
EPOCHORDINAL = datetime.datetime.utcfromtimestamp(0).toordinal()
class tzutc(datetime.tzinfo):
def utcoffset(self, dt):
return ZERO
def dst(self, dt):
return ZERO
def tzname(self, dt):
return "UTC"
def __eq__(self, other):
return (isinstance(other, tzutc) or
(isinstance(other, tzoffset) and other._offset == ZERO))
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return "%s()" % self.__class__.__name__
__reduce__ = object.__reduce__
class tzoffset(datetime.tzinfo):
def __init__(self, name, offset):
self._name = name
self._offset = datetime.timedelta(seconds=offset)
def utcoffset(self, dt):
return self._offset
def dst(self, dt):
return ZERO
def tzname(self, dt):
return self._name
def __eq__(self, other):
return (isinstance(other, tzoffset) and
self._offset == other._offset)
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return "%s(%s, %s)" % (self.__class__.__name__,
`self._name`,
self._offset.days*86400+self._offset.seconds)
__reduce__ = object.__reduce__
class tzlocal(datetime.tzinfo):
_std_offset = datetime.timedelta(seconds=-time.timezone)
if time.daylight:
_dst_offset = datetime.timedelta(seconds=-time.altzone)
else:
_dst_offset = _std_offset
def utcoffset(self, dt):
if self._isdst(dt):
return self._dst_offset
else:
return self._std_offset
def dst(self, dt):
if self._isdst(dt):
return self._dst_offset-self._std_offset
else:
return ZERO
def tzname(self, dt):
return time.tzname[self._isdst(dt)]
def _isdst(self, dt):
# We can't use mktime here. It is unstable when deciding if
# the hour near to a change is DST or not.
#
# timestamp = time.mktime((dt.year, dt.month, dt.day, dt.hour,
# dt.minute, dt.second, dt.weekday(), 0, -1))
# return time.localtime(timestamp).tm_isdst
#
# The code above yields the following result:
#
#>>> import tz, datetime
#>>> t = tz.tzlocal()
#>>> datetime.datetime(2003,2,15,23,tzinfo=t).tzname()
#'BRDT'
#>>> datetime.datetime(2003,2,16,0,tzinfo=t).tzname()
#'BRST'
#>>> datetime.datetime(2003,2,15,23,tzinfo=t).tzname()
#'BRST'
#>>> datetime.datetime(2003,2,15,22,tzinfo=t).tzname()
#'BRDT'
#>>> datetime.datetime(2003,2,15,23,tzinfo=t).tzname()
#'BRDT'
#
# Here is a more stable implementation:
#
timestamp = ((dt.toordinal() - EPOCHORDINAL) * 86400
+ dt.hour * 3600
+ dt.minute * 60
+ dt.second)
return time.localtime(timestamp+time.timezone).tm_isdst
def __eq__(self, other):
if not isinstance(other, tzlocal):
return False
return (self._std_offset == other._std_offset and
self._dst_offset == other._dst_offset)
return True
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return "%s()" % self.__class__.__name__
__reduce__ = object.__reduce__
class _ttinfo(object):
__slots__ = ["offset", "delta", "isdst", "abbr", "isstd", "isgmt"]
def __init__(self):
for attr in self.__slots__:
setattr(self, attr, None)
def __repr__(self):
l = []
for attr in self.__slots__:
value = getattr(self, attr)
if value is not None:
l.append("%s=%s" % (attr, `value`))
return "%s(%s)" % (self.__class__.__name__, ", ".join(l))
def __eq__(self, other):
if not isinstance(other, _ttinfo):
return False
return (self.offset == other.offset and
self.delta == other.delta and
self.isdst == other.isdst and
self.abbr == other.abbr and
self.isstd == other.isstd and
self.isgmt == other.isgmt)
def __ne__(self, other):
return not self.__eq__(other)
def __getstate__(self):
state = {}
for name in self.__slots__:
state[name] = getattr(self, name, None)
return state
def __setstate__(self, state):
for name in self.__slots__:
if name in state:
setattr(self, name, state[name])
class tzfile(datetime.tzinfo):
# http://www.twinsun.com/tz/tz-link.htm
# ftp://elsie.nci.nih.gov/pub/tz*.tar.gz
def __init__(self, fileobj):
if isinstance(fileobj, basestring):
self._filename = fileobj
fileobj = open(fileobj)
elif hasattr(fileobj, "name"):
self._filename = fileobj.name
else:
self._filename = `fileobj`
# From tzfile(5):
#
# The time zone information files used by tzset(3)
# begin with the magic characters "TZif" to identify
# them as time zone information files, followed by
# sixteen bytes reserved for future use, followed by
# six four-byte values of type long, written in a
# ``standard'' byte order (the high-order byte
# of the value is written first).
if fileobj.read(4) != "TZif":
raise ValueError, "magic not found"
fileobj.read(16)
(
# The number of UTC/local indicators stored in the file.
ttisgmtcnt,
# The number of standard/wall indicators stored in the file.
ttisstdcnt,
# The number of leap seconds for which data is
# stored in the file.
leapcnt,
# The number of "transition times" for which data
# is stored in the file.
timecnt,
# The number of "local time types" for which data
# is stored in the file (must not be zero).
typecnt,
# The number of characters of "time zone
# abbreviation strings" stored in the file.
charcnt,
) = struct.unpack(">6l", fileobj.read(24))
# The above header is followed by tzh_timecnt four-byte
# values of type long, sorted in ascending order.
# These values are written in ``standard'' byte order.
# Each is used as a transition time (as returned by
# time(2)) at which the rules for computing local time
# change.
if timecnt:
self._trans_list = struct.unpack(">%dl" % timecnt,
fileobj.read(timecnt*4))
else:
self._trans_list = []
# Next come tzh_timecnt one-byte values of type unsigned
# char; each one tells which of the different types of
# ``local time'' types described in the file is associated
# with the same-indexed transition time. These values
# serve as indices into an array of ttinfo structures that
# appears next in the file.
if timecnt:
self._trans_idx = struct.unpack(">%dB" % timecnt,
fileobj.read(timecnt))
else:
self._trans_idx = []
# Each ttinfo structure is written as a four-byte value
# for tt_gmtoff of type long, in a standard byte
# order, followed by a one-byte value for tt_isdst
# and a one-byte value for tt_abbrind. In each
# structure, tt_gmtoff gives the number of
# seconds to be added to UTC, tt_isdst tells whether
# tm_isdst should be set by localtime(3), and
# tt_abbrind serves as an index into the array of
# time zone abbreviation characters that follow the
# ttinfo structure(s) in the file.
ttinfo = []
for i in range(typecnt):
ttinfo.append(struct.unpack(">lbb", fileobj.read(6)))
abbr = fileobj.read(charcnt)
# Then there are tzh_leapcnt pairs of four-byte
# values, written in standard byte order; the
# first value of each pair gives the time (as
# returned by time(2)) at which a leap second
# occurs; the second gives the total number of
# leap seconds to be applied after the given time.
# The pairs of values are sorted in ascending order
# by time.
# Not used, for now
if leapcnt:
leap = struct.unpack(">%dl" % (leapcnt*2),
fileobj.read(leapcnt*8))
# Then there are tzh_ttisstdcnt standard/wall
# indicators, each stored as a one-byte value;
# they tell whether the transition times associated
# with local time types were specified as standard
# time or wall clock time, and are used when
# a time zone file is used in handling POSIX-style
# time zone environment variables.
if ttisstdcnt:
isstd = struct.unpack(">%db" % ttisstdcnt,
fileobj.read(ttisstdcnt))
# Finally, there are tzh_ttisgmtcnt UTC/local
# indicators, each stored as a one-byte value;
# they tell whether the transition times associated
# with local time types were specified as UTC or
# local time, and are used when a time zone file
# is used in handling POSIX-style time zone envi-
# ronment variables.
if ttisgmtcnt:
isgmt = struct.unpack(">%db" % ttisgmtcnt,
fileobj.read(ttisgmtcnt))
# ** Everything has been read **
# Build ttinfo list
self._ttinfo_list = []
for i in range(typecnt):
gmtoff, isdst, abbrind = ttinfo[i]
# Round to full-minutes if that's not the case. Python's
# datetime doesn't accept sub-minute timezones. Check
# http://python.org/sf/1447945 for some information.
gmtoff = (gmtoff+30)//60*60
tti = _ttinfo()
tti.offset = gmtoff
tti.delta = datetime.timedelta(seconds=gmtoff)
tti.isdst = isdst
tti.abbr = abbr[abbrind:abbr.find('\x00', abbrind)]
tti.isstd = (ttisstdcnt > i and isstd[i] != 0)
tti.isgmt = (ttisgmtcnt > i and isgmt[i] != 0)
self._ttinfo_list.append(tti)
# Replace ttinfo indexes for ttinfo objects.
trans_idx = []
for idx in self._trans_idx:
trans_idx.append(self._ttinfo_list[idx])
self._trans_idx = tuple(trans_idx)
# Set standard, dst, and before ttinfos. before will be
# used when a given time is before any transitions,
# and will be set to the first non-dst ttinfo, or to
# the first dst, if all of them are dst.
self._ttinfo_std = None
self._ttinfo_dst = None
self._ttinfo_before = None
if self._ttinfo_list:
if not self._trans_list:
self._ttinfo_std = self._ttinfo_first = self._ttinfo_list[0]
else:
for i in range(timecnt-1,-1,-1):
tti = self._trans_idx[i]
if not self._ttinfo_std and not tti.isdst:
self._ttinfo_std = tti
elif not self._ttinfo_dst and tti.isdst:
self._ttinfo_dst = tti
if self._ttinfo_std and self._ttinfo_dst:
break
else:
if self._ttinfo_dst and not self._ttinfo_std:
self._ttinfo_std = self._ttinfo_dst
for tti in self._ttinfo_list:
if not tti.isdst:
self._ttinfo_before = tti
break
else:
self._ttinfo_before = self._ttinfo_list[0]
# Now fix transition times to become relative to wall time.
#
# I'm not sure about this. In my tests, the tz source file
# is setup to wall time, and in the binary file isstd and
# isgmt are off, so it should be in wall time. OTOH, it's
# always in gmt time. Let me know if you have comments
# about this.
laststdoffset = 0
self._trans_list = list(self._trans_list)
for i in range(len(self._trans_list)):
tti = self._trans_idx[i]
if not tti.isdst:
# This is std time.
self._trans_list[i] += tti.offset
laststdoffset = tti.offset
else:
# This is dst time. Convert to std.
self._trans_list[i] += laststdoffset
self._trans_list = tuple(self._trans_list)
def _find_ttinfo(self, dt, laststd=0):
timestamp = ((dt.toordinal() - EPOCHORDINAL) * 86400
+ dt.hour * 3600
+ dt.minute * 60
+ dt.second)
idx = 0
for trans in self._trans_list:
if timestamp < trans:
break
idx += 1
else:
return self._ttinfo_std
if idx == 0:
return self._ttinfo_before
if laststd:
while idx > 0:
tti = self._trans_idx[idx-1]
if not tti.isdst:
return tti
idx -= 1
else:
return self._ttinfo_std
else:
return self._trans_idx[idx-1]
def utcoffset(self, dt):
if not self._ttinfo_std:
return ZERO
return self._find_ttinfo(dt).delta
def dst(self, dt):
if not self._ttinfo_dst:
return ZERO
tti = self._find_ttinfo(dt)
if not tti.isdst:
return ZERO
# The documentation says that utcoffset()-dst() must
# be constant for every dt.
return tti.delta-self._find_ttinfo(dt, laststd=1).delta
# An alternative for that would be:
#
# return self._ttinfo_dst.offset-self._ttinfo_std.offset
#
# However, this class stores historical changes in the
# dst offset, so I belive that this wouldn't be the right
# way to implement this.
def tzname(self, dt):
if not self._ttinfo_std:
return None
return self._find_ttinfo(dt).abbr
def __eq__(self, other):
if not isinstance(other, tzfile):
return False
return (self._trans_list == other._trans_list and
self._trans_idx == other._trans_idx and
self._ttinfo_list == other._ttinfo_list)
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, `self._filename`)
def __reduce__(self):
if not os.path.isfile(self._filename):
raise ValueError, "Unpickable %s class" % self.__class__.__name__
return (self.__class__, (self._filename,))
class tzrange(datetime.tzinfo):
def __init__(self, stdabbr, stdoffset=None,
dstabbr=None, dstoffset=None,
start=None, end=None):
global relativedelta
if not relativedelta:
from dateutil import relativedelta
self._std_abbr = stdabbr
self._dst_abbr = dstabbr
if stdoffset is not None:
self._std_offset = datetime.timedelta(seconds=stdoffset)
else:
self._std_offset = ZERO
if dstoffset is not None:
self._dst_offset = datetime.timedelta(seconds=dstoffset)
elif dstabbr and stdoffset is not None:
self._dst_offset = self._std_offset+datetime.timedelta(hours=+1)
else:
self._dst_offset = ZERO
if dstabbr and start is None:
self._start_delta = relativedelta.relativedelta(
hours=+2, month=4, day=1, weekday=relativedelta.SU(+1))
else:
self._start_delta = start
if dstabbr and end is None:
self._end_delta = relativedelta.relativedelta(
hours=+1, month=10, day=31, weekday=relativedelta.SU(-1))
else:
self._end_delta = end
def utcoffset(self, dt):
if self._isdst(dt):
return self._dst_offset
else:
return self._std_offset
def dst(self, dt):
if self._isdst(dt):
return self._dst_offset-self._std_offset
else:
return ZERO
def tzname(self, dt):
if self._isdst(dt):
return self._dst_abbr
else:
return self._std_abbr
def _isdst(self, dt):
if not self._start_delta:
return False
year = datetime.datetime(dt.year,1,1)
start = year+self._start_delta
end = year+self._end_delta
dt = dt.replace(tzinfo=None)
if start < end:
return dt >= start and dt < end
else:
return dt >= start or dt < end
def __eq__(self, other):
if not isinstance(other, tzrange):
return False
return (self._std_abbr == other._std_abbr and
self._dst_abbr == other._dst_abbr and
self._std_offset == other._std_offset and
self._dst_offset == other._dst_offset and
self._start_delta == other._start_delta and
self._end_delta == other._end_delta)
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return "%s(...)" % self.__class__.__name__
__reduce__ = object.__reduce__
class tzstr(tzrange):
def __init__(self, s):
global parser
if not parser:
from dateutil import parser
self._s = s
res = parser._parsetz(s)
if res is None:
raise ValueError, "unknown string format"
# Here we break the compatibility with the TZ variable handling.
# GMT-3 actually *means* the timezone -3.
if res.stdabbr in ("GMT", "UTC"):
res.stdoffset *= -1
# We must initialize it first, since _delta() needs
# _std_offset and _dst_offset set. Use False in start/end
# to avoid building it two times.
tzrange.__init__(self, res.stdabbr, res.stdoffset,
res.dstabbr, res.dstoffset,
start=False, end=False)
if not res.dstabbr:
self._start_delta = None
self._end_delta = None
else:
self._start_delta = self._delta(res.start)
if self._start_delta:
self._end_delta = self._delta(res.end, isend=1)
def _delta(self, x, isend=0):
kwargs = {}
if x.month is not None:
kwargs["month"] = x.month
if x.weekday is not None:
kwargs["weekday"] = relativedelta.weekday(x.weekday, x.week)
if x.week > 0:
kwargs["day"] = 1
else:
kwargs["day"] = 31
elif x.day:
kwargs["day"] = x.day
elif x.yday is not None:
kwargs["yearday"] = x.yday
elif x.jyday is not None:
kwargs["nlyearday"] = x.jyday
if not kwargs:
# Default is to start on first sunday of april, and end
# on last sunday of october.
if not isend:
kwargs["month"] = 4
kwargs["day"] = 1
kwargs["weekday"] = relativedelta.SU(+1)
else:
kwargs["month"] = 10
kwargs["day"] = 31
kwargs["weekday"] = relativedelta.SU(-1)
if x.time is not None:
kwargs["seconds"] = x.time
else:
# Default is 2AM.
kwargs["seconds"] = 7200
if isend:
# Convert to standard time, to follow the documented way
# of working with the extra hour. See the documentation
# of the tzinfo class.
delta = self._dst_offset-self._std_offset
kwargs["seconds"] -= delta.seconds+delta.days*86400
return relativedelta.relativedelta(**kwargs)
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, `self._s`)
class _tzicalvtzcomp:
def __init__(self, tzoffsetfrom, tzoffsetto, isdst,
tzname=None, rrule=None):
self.tzoffsetfrom = datetime.timedelta(seconds=tzoffsetfrom)
self.tzoffsetto = datetime.timedelta(seconds=tzoffsetto)
self.tzoffsetdiff = self.tzoffsetto-self.tzoffsetfrom
self.isdst = isdst
self.tzname = tzname
self.rrule = rrule
class _tzicalvtz(datetime.tzinfo):
def __init__(self, tzid, comps=[]):
self._tzid = tzid
self._comps = comps
self._cachedate = []
self._cachecomp = []
def _find_comp(self, dt):
if len(self._comps) == 1:
return self._comps[0]
dt = dt.replace(tzinfo=None)
try:
return self._cachecomp[self._cachedate.index(dt)]
except ValueError:
pass
lastcomp = None
lastcompdt = None
for comp in self._comps:
if not comp.isdst:
# Handle the extra hour in DST -> STD
compdt = comp.rrule.before(dt-comp.tzoffsetdiff, inc=True)
else:
compdt = comp.rrule.before(dt, inc=True)
if compdt and (not lastcompdt or lastcompdt < compdt):
lastcompdt = compdt
lastcomp = comp
if not lastcomp:
# RFC says nothing about what to do when a given
# time is before the first onset date. We'll look for the
# first standard component, or the first component, if
# none is found.
for comp in self._comps:
if not comp.isdst:
lastcomp = comp
break
else:
lastcomp = comp[0]
self._cachedate.insert(0, dt)
self._cachecomp.insert(0, lastcomp)
if len(self._cachedate) > 10:
self._cachedate.pop()
self._cachecomp.pop()
return lastcomp
def utcoffset(self, dt):
return self._find_comp(dt).tzoffsetto
def dst(self, dt):
comp = self._find_comp(dt)
if comp.isdst:
return comp.tzoffsetdiff
else:
return ZERO
def tzname(self, dt):
return self._find_comp(dt).tzname
def __repr__(self):
return "<tzicalvtz %s>" % `self._tzid`
__reduce__ = object.__reduce__
class tzical:
def __init__(self, fileobj):
global rrule
if not rrule:
from dateutil import rrule
if isinstance(fileobj, basestring):
self._s = fileobj
fileobj = open(fileobj)
elif hasattr(fileobj, "name"):
self._s = fileobj.name
else:
self._s = `fileobj`
self._vtz = {}
self._parse_rfc(fileobj.read())
def keys(self):
return self._vtz.keys()
def get(self, tzid=None):
if tzid is None:
keys = self._vtz.keys()
if len(keys) == 0:
raise ValueError, "no timezones defined"
elif len(keys) > 1:
raise ValueError, "more than one timezone available"
tzid = keys[0]
return self._vtz.get(tzid)
def _parse_offset(self, s):
s = s.strip()
if not s:
raise ValueError, "empty offset"
if s[0] in ('+', '-'):
signal = (-1,+1)[s[0]=='+']
s = s[1:]
else:
signal = +1
if len(s) == 4:
return (int(s[:2])*3600+int(s[2:])*60)*signal
elif len(s) == 6:
return (int(s[:2])*3600+int(s[2:4])*60+int(s[4:]))*signal
else:
raise ValueError, "invalid offset: "+s
def _parse_rfc(self, s):
lines = s.splitlines()
if not lines:
raise ValueError, "empty string"
# Unfold
i = 0
while i < len(lines):
line = lines[i].rstrip()
if not line:
del lines[i]
elif i > 0 and line[0] == " ":
lines[i-1] += line[1:]
del lines[i]
else:
i += 1
tzid = None
comps = []
invtz = False
comptype = None
for line in lines:
if not line:
continue
name, value = line.split(':', 1)
parms = name.split(';')
if not parms:
raise ValueError, "empty property name"
name = parms[0].upper()
parms = parms[1:]
if invtz:
if name == "BEGIN":
if value in ("STANDARD", "DAYLIGHT"):
# Process component
pass
else:
raise ValueError, "unknown component: "+value
comptype = value
founddtstart = False
tzoffsetfrom = None
tzoffsetto = None
rrulelines = []
tzname = None
elif name == "END":
if value == "VTIMEZONE":
if comptype:
raise ValueError, \
"component not closed: "+comptype
if not tzid:
raise ValueError, \
"mandatory TZID not found"
if not comps:
raise ValueError, \
"at least one component is needed"
# Process vtimezone
self._vtz[tzid] = _tzicalvtz(tzid, comps)
invtz = False
elif value == comptype:
if not founddtstart:
raise ValueError, \
"mandatory DTSTART not found"
if tzoffsetfrom is None:
raise ValueError, \
"mandatory TZOFFSETFROM not found"
if tzoffsetto is None:
raise ValueError, \
"mandatory TZOFFSETFROM not found"
# Process component
rr = None
if rrulelines:
rr = rrule.rrulestr("\n".join(rrulelines),
compatible=True,
ignoretz=True,
cache=True)
comp = _tzicalvtzcomp(tzoffsetfrom, tzoffsetto,
(comptype == "DAYLIGHT"),
tzname, rr)
comps.append(comp)
comptype = None
else:
raise ValueError, \
"invalid component end: "+value
elif comptype:
if name == "DTSTART":
rrulelines.append(line)
founddtstart = True
elif name in ("RRULE", "RDATE", "EXRULE", "EXDATE"):
rrulelines.append(line)
elif name == "TZOFFSETFROM":
if parms:
raise ValueError, \
"unsupported %s parm: %s "%(name, parms[0])
tzoffsetfrom = self._parse_offset(value)
elif name == "TZOFFSETTO":
if parms:
raise ValueError, \
"unsupported TZOFFSETTO parm: "+parms[0]
tzoffsetto = self._parse_offset(value)
elif name == "TZNAME":
if parms:
raise ValueError, \
"unsupported TZNAME parm: "+parms[0]
tzname = value
elif name == "COMMENT":
pass
else:
raise ValueError, "unsupported property: "+name
else:
if name == "TZID":
if parms:
raise ValueError, \
"unsupported TZID parm: "+parms[0]
tzid = value
elif name in ("TZURL", "LAST-MODIFIED", "COMMENT"):
pass
else:
raise ValueError, "unsupported property: "+name
elif name == "BEGIN" and value == "VTIMEZONE":
tzid = None
comps = []
invtz = True
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, `self._s`)
if sys.platform != "win32":
TZFILES = ["/etc/localtime", "localtime"]
TZPATHS = ["/usr/share/zoneinfo", "/usr/lib/zoneinfo", "/etc/zoneinfo"]
else:
TZFILES = []
TZPATHS = []
def gettz(name=None):
tz = None
if not name:
try:
name = os.environ["TZ"]
except KeyError:
pass
if name is None or name == ":":
for filepath in TZFILES:
if not os.path.isabs(filepath):
filename = filepath
for path in TZPATHS:
filepath = os.path.join(path, filename)
if os.path.isfile(filepath):
break
else:
continue
if os.path.isfile(filepath):
try:
tz = tzfile(filepath)
break
except (IOError, OSError, ValueError):
pass
else:
tz = tzlocal()
else:
if name.startswith(":"):
name = name[:-1]
if os.path.isabs(name):
if os.path.isfile(name):
tz = tzfile(name)
else:
tz = None
else:
for path in TZPATHS:
filepath = os.path.join(path, name)
if not os.path.isfile(filepath):
filepath = filepath.replace(' ','_')
if not os.path.isfile(filepath):
continue
try:
tz = tzfile(filepath)
break
except (IOError, OSError, ValueError):
pass
else:
tz = None
if tzwin:
try:
tz = tzwin(name)
except OSError:
pass
if not tz:
from dateutil.zoneinfo import gettz
tz = gettz(name)
if not tz:
for c in name:
# name must have at least one offset to be a tzstr
if c in "0123456789":
try:
tz = tzstr(name)
except ValueError:
pass
break
else:
if name in ("GMT", "UTC"):
tz = tzutc()
elif name in time.tzname:
tz = tzlocal()
return tz
# vim:ts=4:sw=4:et
| Python |
"""
Copyright (c) 2003-2005 Gustavo Niemeyer <gustavo@niemeyer.net>
This module offers extensions to the standard python 2.3+
datetime module.
"""
from dateutil.tz import tzfile
from tarfile import TarFile
import os
__author__ = "Gustavo Niemeyer <gustavo@niemeyer.net>"
__license__ = "PSF License"
__all__ = ["setcachesize", "gettz", "rebuild"]
CACHE = []
CACHESIZE = 10
class tzfile(tzfile):
def __reduce__(self):
return (gettz, (self._filename,))
def getzoneinfofile():
filenames = os.listdir(os.path.join(os.path.dirname(__file__)))
filenames.sort()
filenames.reverse()
for entry in filenames:
if entry.startswith("zoneinfo") and ".tar." in entry:
return os.path.join(os.path.dirname(__file__), entry)
return None
ZONEINFOFILE = getzoneinfofile()
del getzoneinfofile
def setcachesize(size):
global CACHESIZE, CACHE
CACHESIZE = size
del CACHE[size:]
def gettz(name):
tzinfo = None
if ZONEINFOFILE:
for cachedname, tzinfo in CACHE:
if cachedname == name:
break
else:
tf = TarFile.open(ZONEINFOFILE)
try:
zonefile = tf.extractfile(name)
except KeyError:
tzinfo = None
else:
tzinfo = tzfile(zonefile)
tf.close()
CACHE.insert(0, (name, tzinfo))
del CACHE[CACHESIZE:]
return tzinfo
def rebuild(filename, tag=None, format="gz"):
import tempfile, shutil
tmpdir = tempfile.mkdtemp()
zonedir = os.path.join(tmpdir, "zoneinfo")
moduledir = os.path.dirname(__file__)
if tag: tag = "-"+tag
targetname = "zoneinfo%s.tar.%s" % (tag, format)
try:
tf = TarFile.open(filename)
for name in tf.getnames():
if not (name.endswith(".sh") or
name.endswith(".tab") or
name == "leapseconds"):
tf.extract(name, tmpdir)
filepath = os.path.join(tmpdir, name)
os.system("zic -d %s %s" % (zonedir, filepath))
tf.close()
target = os.path.join(moduledir, targetname)
for entry in os.listdir(moduledir):
if entry.startswith("zoneinfo") and ".tar." in entry:
os.unlink(os.path.join(moduledir, entry))
tf = TarFile.open(target, "w:%s" % format)
for entry in os.listdir(zonedir):
entrypath = os.path.join(zonedir, entry)
tf.add(entrypath, entry)
tf.close()
finally:
shutil.rmtree(tmpdir)
| Python |
# This code was originally contributed by Jeffrey Harris.
import datetime
import struct
import _winreg
__author__ = "Jeffrey Harris & Gustavo Niemeyer <gustavo@niemeyer.net>"
__all__ = ["tzwin", "tzwinlocal"]
ONEWEEK = datetime.timedelta(7)
TZKEYNAMENT = r"SOFTWARE\Microsoft\Windows NT\CurrentVersion\Time Zones"
TZKEYNAME9X = r"SOFTWARE\Microsoft\Windows\CurrentVersion\Time Zones"
TZLOCALKEYNAME = r"SYSTEM\CurrentControlSet\Control\TimeZoneInformation"
def _settzkeyname():
global TZKEYNAME
handle = _winreg.ConnectRegistry(None, _winreg.HKEY_LOCAL_MACHINE)
try:
_winreg.OpenKey(handle, TZKEYNAMENT).Close()
TZKEYNAME = TZKEYNAMENT
except WindowsError:
TZKEYNAME = TZKEYNAME9X
handle.Close()
_settzkeyname()
class tzwinbase(datetime.tzinfo):
"""tzinfo class based on win32's timezones available in the registry."""
def utcoffset(self, dt):
if self._isdst(dt):
return datetime.timedelta(minutes=self._dstoffset)
else:
return datetime.timedelta(minutes=self._stdoffset)
def dst(self, dt):
if self._isdst(dt):
minutes = self._dstoffset - self._stdoffset
return datetime.timedelta(minutes=minutes)
else:
return datetime.timedelta(0)
def tzname(self, dt):
if self._isdst(dt):
return self._dstname
else:
return self._stdname
def list():
"""Return a list of all time zones known to the system."""
handle = _winreg.ConnectRegistry(None, _winreg.HKEY_LOCAL_MACHINE)
tzkey = _winreg.OpenKey(handle, TZKEYNAME)
result = [_winreg.EnumKey(tzkey, i)
for i in range(_winreg.QueryInfoKey(tzkey)[0])]
tzkey.Close()
handle.Close()
return result
list = staticmethod(list)
def display(self):
return self._display
def _isdst(self, dt):
dston = picknthweekday(dt.year, self._dstmonth, self._dstdayofweek,
self._dsthour, self._dstminute,
self._dstweeknumber)
dstoff = picknthweekday(dt.year, self._stdmonth, self._stddayofweek,
self._stdhour, self._stdminute,
self._stdweeknumber)
if dston < dstoff:
return dston <= dt.replace(tzinfo=None) < dstoff
else:
return not dstoff <= dt.replace(tzinfo=None) < dston
class tzwin(tzwinbase):
def __init__(self, name):
self._name = name
handle = _winreg.ConnectRegistry(None, _winreg.HKEY_LOCAL_MACHINE)
tzkey = _winreg.OpenKey(handle, "%s\%s" % (TZKEYNAME, name))
keydict = valuestodict(tzkey)
tzkey.Close()
handle.Close()
self._stdname = keydict["Std"].encode("iso-8859-1")
self._dstname = keydict["Dlt"].encode("iso-8859-1")
self._display = keydict["Display"]
# See http://ww_winreg.jsiinc.com/SUBA/tip0300/rh0398.htm
tup = struct.unpack("=3l16h", keydict["TZI"])
self._stdoffset = -tup[0]-tup[1] # Bias + StandardBias * -1
self._dstoffset = self._stdoffset-tup[2] # + DaylightBias * -1
(self._stdmonth,
self._stddayofweek, # Sunday = 0
self._stdweeknumber, # Last = 5
self._stdhour,
self._stdminute) = tup[4:9]
(self._dstmonth,
self._dstdayofweek, # Sunday = 0
self._dstweeknumber, # Last = 5
self._dsthour,
self._dstminute) = tup[12:17]
def __repr__(self):
return "tzwin(%s)" % repr(self._name)
def __reduce__(self):
return (self.__class__, (self._name,))
class tzwinlocal(tzwinbase):
def __init__(self):
handle = _winreg.ConnectRegistry(None, _winreg.HKEY_LOCAL_MACHINE)
tzlocalkey = _winreg.OpenKey(handle, TZLOCALKEYNAME)
keydict = valuestodict(tzlocalkey)
tzlocalkey.Close()
self._stdname = keydict["StandardName"].encode("iso-8859-1")
self._dstname = keydict["DaylightName"].encode("iso-8859-1")
try:
tzkey = _winreg.OpenKey(handle, "%s\%s"%(TZKEYNAME, self._stdname))
_keydict = valuestodict(tzkey)
self._display = _keydict["Display"]
tzkey.Close()
except OSError:
self._display = None
handle.Close()
self._stdoffset = -keydict["Bias"]-keydict["StandardBias"]
self._dstoffset = self._stdoffset-keydict["DaylightBias"]
# See http://ww_winreg.jsiinc.com/SUBA/tip0300/rh0398.htm
tup = struct.unpack("=8h", keydict["StandardStart"])
(self._stdmonth,
self._stddayofweek, # Sunday = 0
self._stdweeknumber, # Last = 5
self._stdhour,
self._stdminute) = tup[1:6]
tup = struct.unpack("=8h", keydict["DaylightStart"])
(self._dstmonth,
self._dstdayofweek, # Sunday = 0
self._dstweeknumber, # Last = 5
self._dsthour,
self._dstminute) = tup[1:6]
def __reduce__(self):
return (self.__class__, ())
def picknthweekday(year, month, dayofweek, hour, minute, whichweek):
"""dayofweek == 0 means Sunday, whichweek 5 means last instance"""
first = datetime.datetime(year, month, 1, hour, minute)
weekdayone = first.replace(day=((dayofweek-first.isoweekday())%7+1))
for n in xrange(whichweek):
dt = weekdayone+(whichweek-n)*ONEWEEK
if dt.month == month:
return dt
def valuestodict(key):
"""Convert a registry key's values to a dictionary."""
dict = {}
size = _winreg.QueryInfoKey(key)[1]
for i in range(size):
data = _winreg.EnumValue(key, i)
dict[data[0]] = data[1]
return dict
| Python |
"""
Copyright (c) 2003-2007 Gustavo Niemeyer <gustavo@niemeyer.net>
This module offers extensions to the standard python 2.3+
datetime module.
"""
__author__ = "Gustavo Niemeyer <gustavo@niemeyer.net>"
__license__ = "PSF License"
import datetime
import calendar
__all__ = ["relativedelta", "MO", "TU", "WE", "TH", "FR", "SA", "SU"]
class weekday(object):
__slots__ = ["weekday", "n"]
def __init__(self, weekday, n=None):
self.weekday = weekday
self.n = n
def __call__(self, n):
if n == self.n:
return self
else:
return self.__class__(self.weekday, n)
def __eq__(self, other):
try:
if self.weekday != other.weekday or self.n != other.n:
return False
except AttributeError:
return False
return True
def __repr__(self):
s = ("MO", "TU", "WE", "TH", "FR", "SA", "SU")[self.weekday]
if not self.n:
return s
else:
return "%s(%+d)" % (s, self.n)
MO, TU, WE, TH, FR, SA, SU = weekdays = tuple([weekday(x) for x in range(7)])
class relativedelta:
"""
The relativedelta type is based on the specification of the excelent
work done by M.-A. Lemburg in his mx.DateTime extension. However,
notice that this type does *NOT* implement the same algorithm as
his work. Do *NOT* expect it to behave like mx.DateTime's counterpart.
There's two different ways to build a relativedelta instance. The
first one is passing it two date/datetime classes:
relativedelta(datetime1, datetime2)
And the other way is to use the following keyword arguments:
year, month, day, hour, minute, second, microsecond:
Absolute information.
years, months, weeks, days, hours, minutes, seconds, microseconds:
Relative information, may be negative.
weekday:
One of the weekday instances (MO, TU, etc). These instances may
receive a parameter N, specifying the Nth weekday, which could
be positive or negative (like MO(+1) or MO(-2). Not specifying
it is the same as specifying +1. You can also use an integer,
where 0=MO.
leapdays:
Will add given days to the date found, if year is a leap
year, and the date found is post 28 of february.
yearday, nlyearday:
Set the yearday or the non-leap year day (jump leap days).
These are converted to day/month/leapdays information.
Here is the behavior of operations with relativedelta:
1) Calculate the absolute year, using the 'year' argument, or the
original datetime year, if the argument is not present.
2) Add the relative 'years' argument to the absolute year.
3) Do steps 1 and 2 for month/months.
4) Calculate the absolute day, using the 'day' argument, or the
original datetime day, if the argument is not present. Then,
subtract from the day until it fits in the year and month
found after their operations.
5) Add the relative 'days' argument to the absolute day. Notice
that the 'weeks' argument is multiplied by 7 and added to
'days'.
6) Do steps 1 and 2 for hour/hours, minute/minutes, second/seconds,
microsecond/microseconds.
7) If the 'weekday' argument is present, calculate the weekday,
with the given (wday, nth) tuple. wday is the index of the
weekday (0-6, 0=Mon), and nth is the number of weeks to add
forward or backward, depending on its signal. Notice that if
the calculated date is already Monday, for example, using
(0, 1) or (0, -1) won't change the day.
"""
def __init__(self, dt1=None, dt2=None,
years=0, months=0, days=0, leapdays=0, weeks=0,
hours=0, minutes=0, seconds=0, microseconds=0,
year=None, month=None, day=None, weekday=None,
yearday=None, nlyearday=None,
hour=None, minute=None, second=None, microsecond=None):
if dt1 and dt2:
if not isinstance(dt1, datetime.date) or \
not isinstance(dt2, datetime.date):
raise TypeError, "relativedelta only diffs datetime/date"
if type(dt1) is not type(dt2):
if not isinstance(dt1, datetime.datetime):
dt1 = datetime.datetime.fromordinal(dt1.toordinal())
elif not isinstance(dt2, datetime.datetime):
dt2 = datetime.datetime.fromordinal(dt2.toordinal())
self.years = 0
self.months = 0
self.days = 0
self.leapdays = 0
self.hours = 0
self.minutes = 0
self.seconds = 0
self.microseconds = 0
self.year = None
self.month = None
self.day = None
self.weekday = None
self.hour = None
self.minute = None
self.second = None
self.microsecond = None
self._has_time = 0
months = (dt1.year*12+dt1.month)-(dt2.year*12+dt2.month)
self._set_months(months)
dtm = self.__radd__(dt2)
if dt1 < dt2:
while dt1 > dtm:
months += 1
self._set_months(months)
dtm = self.__radd__(dt2)
else:
while dt1 < dtm:
months -= 1
self._set_months(months)
dtm = self.__radd__(dt2)
delta = dt1 - dtm
self.seconds = delta.seconds+delta.days*86400
self.microseconds = delta.microseconds
else:
self.years = years
self.months = months
self.days = days+weeks*7
self.leapdays = leapdays
self.hours = hours
self.minutes = minutes
self.seconds = seconds
self.microseconds = microseconds
self.year = year
self.month = month
self.day = day
self.hour = hour
self.minute = minute
self.second = second
self.microsecond = microsecond
if type(weekday) is int:
self.weekday = weekdays[weekday]
else:
self.weekday = weekday
yday = 0
if nlyearday:
yday = nlyearday
elif yearday:
yday = yearday
if yearday > 59:
self.leapdays = -1
if yday:
ydayidx = [31,59,90,120,151,181,212,243,273,304,334,366]
for idx, ydays in enumerate(ydayidx):
if yday <= ydays:
self.month = idx+1
if idx == 0:
self.day = ydays
else:
self.day = yday-ydayidx[idx-1]
break
else:
raise ValueError, "invalid year day (%d)" % yday
self._fix()
def _fix(self):
if abs(self.microseconds) > 999999:
s = self.microseconds//abs(self.microseconds)
div, mod = divmod(self.microseconds*s, 1000000)
self.microseconds = mod*s
self.seconds += div*s
if abs(self.seconds) > 59:
s = self.seconds//abs(self.seconds)
div, mod = divmod(self.seconds*s, 60)
self.seconds = mod*s
self.minutes += div*s
if abs(self.minutes) > 59:
s = self.minutes//abs(self.minutes)
div, mod = divmod(self.minutes*s, 60)
self.minutes = mod*s
self.hours += div*s
if abs(self.hours) > 23:
s = self.hours//abs(self.hours)
div, mod = divmod(self.hours*s, 24)
self.hours = mod*s
self.days += div*s
if abs(self.months) > 11:
s = self.months//abs(self.months)
div, mod = divmod(self.months*s, 12)
self.months = mod*s
self.years += div*s
if (self.hours or self.minutes or self.seconds or self.microseconds or
self.hour is not None or self.minute is not None or
self.second is not None or self.microsecond is not None):
self._has_time = 1
else:
self._has_time = 0
def _set_months(self, months):
self.months = months
if abs(self.months) > 11:
s = self.months//abs(self.months)
div, mod = divmod(self.months*s, 12)
self.months = mod*s
self.years = div*s
else:
self.years = 0
def __radd__(self, other):
if not isinstance(other, datetime.date):
raise TypeError, "unsupported type for add operation"
elif self._has_time and not isinstance(other, datetime.datetime):
other = datetime.datetime.fromordinal(other.toordinal())
year = (self.year or other.year)+self.years
month = self.month or other.month
if self.months:
assert 1 <= abs(self.months) <= 12
month += self.months
if month > 12:
year += 1
month -= 12
elif month < 1:
year -= 1
month += 12
day = min(calendar.monthrange(year, month)[1],
self.day or other.day)
repl = {"year": year, "month": month, "day": day}
for attr in ["hour", "minute", "second", "microsecond"]:
value = getattr(self, attr)
if value is not None:
repl[attr] = value
days = self.days
if self.leapdays and month > 2 and calendar.isleap(year):
days += self.leapdays
ret = (other.replace(**repl)
+ datetime.timedelta(days=days,
hours=self.hours,
minutes=self.minutes,
seconds=self.seconds,
microseconds=self.microseconds))
if self.weekday:
weekday, nth = self.weekday.weekday, self.weekday.n or 1
jumpdays = (abs(nth)-1)*7
if nth > 0:
jumpdays += (7-ret.weekday()+weekday)%7
else:
jumpdays += (ret.weekday()-weekday)%7
jumpdays *= -1
ret += datetime.timedelta(days=jumpdays)
return ret
def __rsub__(self, other):
return self.__neg__().__radd__(other)
def __add__(self, other):
if not isinstance(other, relativedelta):
raise TypeError, "unsupported type for add operation"
return relativedelta(years=other.years+self.years,
months=other.months+self.months,
days=other.days+self.days,
hours=other.hours+self.hours,
minutes=other.minutes+self.minutes,
seconds=other.seconds+self.seconds,
microseconds=other.microseconds+self.microseconds,
leapdays=other.leapdays or self.leapdays,
year=other.year or self.year,
month=other.month or self.month,
day=other.day or self.day,
weekday=other.weekday or self.weekday,
hour=other.hour or self.hour,
minute=other.minute or self.minute,
second=other.second or self.second,
microsecond=other.second or self.microsecond)
def __sub__(self, other):
if not isinstance(other, relativedelta):
raise TypeError, "unsupported type for sub operation"
return relativedelta(years=other.years-self.years,
months=other.months-self.months,
days=other.days-self.days,
hours=other.hours-self.hours,
minutes=other.minutes-self.minutes,
seconds=other.seconds-self.seconds,
microseconds=other.microseconds-self.microseconds,
leapdays=other.leapdays or self.leapdays,
year=other.year or self.year,
month=other.month or self.month,
day=other.day or self.day,
weekday=other.weekday or self.weekday,
hour=other.hour or self.hour,
minute=other.minute or self.minute,
second=other.second or self.second,
microsecond=other.second or self.microsecond)
def __neg__(self):
return relativedelta(years=-self.years,
months=-self.months,
days=-self.days,
hours=-self.hours,
minutes=-self.minutes,
seconds=-self.seconds,
microseconds=-self.microseconds,
leapdays=self.leapdays,
year=self.year,
month=self.month,
day=self.day,
weekday=self.weekday,
hour=self.hour,
minute=self.minute,
second=self.second,
microsecond=self.microsecond)
def __nonzero__(self):
return not (not self.years and
not self.months and
not self.days and
not self.hours and
not self.minutes and
not self.seconds and
not self.microseconds and
not self.leapdays and
self.year is None and
self.month is None and
self.day is None and
self.weekday is None and
self.hour is None and
self.minute is None and
self.second is None and
self.microsecond is None)
def __mul__(self, other):
f = float(other)
return relativedelta(years=self.years*f,
months=self.months*f,
days=self.days*f,
hours=self.hours*f,
minutes=self.minutes*f,
seconds=self.seconds*f,
microseconds=self.microseconds*f,
leapdays=self.leapdays,
year=self.year,
month=self.month,
day=self.day,
weekday=self.weekday,
hour=self.hour,
minute=self.minute,
second=self.second,
microsecond=self.microsecond)
def __eq__(self, other):
if not isinstance(other, relativedelta):
return False
if self.weekday or other.weekday:
if not self.weekday or not other.weekday:
return False
if self.weekday.weekday != other.weekday.weekday:
return False
n1, n2 = self.weekday.n, other.weekday.n
if n1 != n2 and not ((not n1 or n1 == 1) and (not n2 or n2 == 1)):
return False
return (self.years == other.years and
self.months == other.months and
self.days == other.days and
self.hours == other.hours and
self.minutes == other.minutes and
self.seconds == other.seconds and
self.leapdays == other.leapdays and
self.year == other.year and
self.month == other.month and
self.day == other.day and
self.hour == other.hour and
self.minute == other.minute and
self.second == other.second and
self.microsecond == other.microsecond)
def __ne__(self, other):
return not self.__eq__(other)
def __div__(self, other):
return self.__mul__(1/float(other))
def __repr__(self):
l = []
for attr in ["years", "months", "days", "leapdays",
"hours", "minutes", "seconds", "microseconds"]:
value = getattr(self, attr)
if value:
l.append("%s=%+d" % (attr, value))
for attr in ["year", "month", "day", "weekday",
"hour", "minute", "second", "microsecond"]:
value = getattr(self, attr)
if value is not None:
l.append("%s=%s" % (attr, `value`))
return "%s(%s)" % (self.__class__.__name__, ", ".join(l))
# vim:ts=4:sw=4:et
| Python |
# Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
parser for volunteermatch
"""
import xml_helpers as xmlh
from datetime import datetime
import dateutil.parser
# pylint: disable-msg=R0915
def parse(s, maxrecs, progress):
"""return FPXML given volunteermatch data"""
# TODO: progress
known_elnames = ['feed', 'title', 'subtitle', 'div', 'span', 'updated', 'id', 'link', 'icon', 'logo', 'author', 'name', 'uri', 'email', 'rights', 'entry', 'published', 'g:publish_date', 'g:expiration_date', 'g:event_date_range', 'g:start', 'g:end', 'updated', 'category', 'summary', 'content', 'awb:city', 'awb:country', 'awb:state', 'awb:postalcode', 'g:location', 'g:age_range', 'g:employer', 'g:job_type', 'g:job_industry', 'awb:paid', ]
xmldoc = xmlh.simple_parser(s, known_elnames, progress)
pubdate = xmlh.get_tag_val(xmldoc, "created")
ts = dateutil.parser.parse(pubdate)
pubdate = ts.strftime("%Y-%m-%dT%H:%M:%S")
# convert to footprint format
s = '<?xml version="1.0" ?>'
s += '<FootprintFeed schemaVersion="0.1">'
s += '<FeedInfo>'
# TODO: assign provider IDs?
s += '<providerID>104</providerID>'
s += '<providerName>volunteermatch.org</providerName>'
s += '<feedID>1</feedID>'
s += '<providerURL>http://www.volunteermatch.org/</providerURL>'
s += '<createdDateTime>%s</createdDateTime>' % (pubdate)
s += '<description></description>'
s += '</FeedInfo>'
numorgs = numopps = 0
# hardcoded: Organization
s += '<Organizations>'
items = xmldoc.getElementsByTagName("listing")
if (maxrecs > items.length or maxrecs == -1):
maxrecs = items.length
for item in items[0:maxrecs]:
orgs = item.getElementsByTagName("parent")
if (orgs.length == 1):
org = orgs[0]
s += '<Organization>'
s += '<organizationID>%s</organizationID>' % (xmlh.get_tag_val(org, "key"))
s += '<nationalEIN></nationalEIN>'
s += '<name>%s</name>' % (xmlh.get_tag_val(org, "name"))
s += '<missionStatement></missionStatement>'
s += '<description></description>'
s += '<location><city></city><region></region><postalCode></postalCode></location>'
s += '<organizationURL>%s</organizationURL>' % (xmlh.get_tag_val(org, "URL"))
s += '<donateURL></donateURL>'
s += '<logoURL></logoURL>'
s += '<detailURL>%s</detailURL>' % (xmlh.get_tag_val(org, "detailURL"))
s += '</Organization>'
numorgs += 1
else:
print datetime.now(), "parse_volunteermatch: listing does not have an organization"
return None
s += '</Organizations>'
s += '<VolunteerOpportunities>'
items = xmldoc.getElementsByTagName("listing")
for item in items[0:maxrecs]:
s += '<VolunteerOpportunity>'
s += '<volunteerOpportunityID>%s</volunteerOpportunityID>' % (xmlh.get_tag_val(item, "key"))
orgs = item.getElementsByTagName("parent")
if (orgs.length == 1):
org = orgs[0]
s += '<sponsoringOrganizationIDs><sponsoringOrganizationID>%s</sponsoringOrganizationID></sponsoringOrganizationIDs>' % (xmlh.get_tag_val(org, "key"))
else:
s += '<sponsoringOrganizationIDs><sponsoringOrganizationID>0</sponsoringOrganizationID></sponsoringOrganizationIDs>'
print datetime.now(), "parse_volunteermatch: listing does not have an organization"
s += '<title>%s</title>' % (xmlh.get_tag_val(item, "title"))
s += '<volunteersNeeded>-8888</volunteersNeeded>'
s += '<dateTimeDurations><dateTimeDuration>'
durations = xmlh.get_children_by_tagname(item, "duration")
if (len(durations) == 1):
duration = durations[0]
ongoing = duration.getAttribute("ongoing")
if (ongoing == 'true'):
s += '<openEnded>Yes</openEnded>'
else:
s += '<openEnded>No</openEnded>'
listingTimes = duration.getElementsByTagName("listingTime")
if (listingTimes.length == 1):
listingTime = listingTimes[0]
s += '<startTime>%s</startTime>' % (xmlh.get_tag_val(listingTime, "startTime"))
s += '<endTime>%s</endTime>' % (xmlh.get_tag_val(listingTime, "endTime"))
else:
print datetime.now(), "parse_volunteermatch: number of durations in item != 1"
return None
commitments = item.getElementsByTagName("commitment")
l_period = l_duration = ""
if (commitments.length == 1):
commitment = commitments[0]
l_num = xmlh.get_tag_val(commitment, "num")
l_duration = xmlh.get_tag_val(commitment, "duration")
l_period = xmlh.get_tag_val(commitment, "period")
if ((l_duration == "hours") and (l_period == "week")):
s += '<commitmentHoursPerWeek>' + l_num + '</commitmentHoursPerWeek>'
elif ((l_duration == "hours") and (l_period == "day")):
# note: weekdays only
s += '<commitmentHoursPerWeek>' + str(int(l_num)*5) + '</commitmentHoursPerWeek>'
elif ((l_duration == "hours") and (l_period == "month")):
hrs = int(float(l_num)/4.0)
if hrs < 1: hrs = 1
s += '<commitmentHoursPerWeek>' + str(hrs) + '</commitmentHoursPerWeek>'
elif ((l_duration == "hours") and (l_period == "event")):
# TODO: ignore for now, later compute the endTime if not already provided
pass
else:
print datetime.now(), "parse_volunteermatch: commitment given in units != hours/week: ", l_duration, "per", l_period
s += '</dateTimeDuration></dateTimeDurations>'
dbaddresses = item.getElementsByTagName("location")
if (dbaddresses.length != 1):
print datetime.now(), "parse_volunteermatch: only 1 location supported."
return None
dbaddress = dbaddresses[0]
s += '<locations><location>'
s += '<streetAddress1>%s</streetAddress1>' % (xmlh.get_tag_val(dbaddress, "street1"))
s += '<city>%s</city>' % (xmlh.get_tag_val(dbaddress, "city"))
s += '<region>%s</region>' % (xmlh.get_tag_val(dbaddress, "region"))
s += '<postalCode>%s</postalCode>' % (xmlh.get_tag_val(dbaddress, "postalCode"))
geolocs = item.getElementsByTagName("geolocation")
if (geolocs.length == 1):
geoloc = geolocs[0]
s += '<latitude>%s</latitude>' % (xmlh.get_tag_val(geoloc, "latitude"))
s += '<longitude>%s</longitude>' % (xmlh.get_tag_val(geoloc, "longitude"))
s += '</location></locations>'
s += '<audienceTags>'
audiences = item.getElementsByTagName("audience")
for audience in audiences:
type = xmlh.node_data(audience)
s += '<audienceTag>%s</audienceTag>' % (type)
s += '</audienceTags>'
s += '<categoryTags>'
categories = item.getElementsByTagName("category")
for category in categories:
type = xmlh.node_data(category)
s += '<categoryTag>%s</categoryTag>' % (type)
s += '</categoryTags>'
s += '<skills>%s</skills>' % (xmlh.get_tag_val(item, "skill"))
s += '<detailURL>%s</detailURL>' % (xmlh.get_tag_val(item, "detailURL"))
s += '<description>%s</description>' % (xmlh.get_tag_val(item, "description"))
expires = xmlh.get_tag_val(item, "expires")
ts = dateutil.parser.parse(expires)
expires = ts.strftime("%Y-%m-%dT%H:%M:%S")
s += '<expires>%s</expires>' % (expires)
s += '</VolunteerOpportunity>'
numopps += 1
s += '</VolunteerOpportunities>'
s += '</FootprintFeed>'
#s = re.sub(r'><([^/])', r'>\n<\1', s)
#print(s)
return s, numorgs, numopps
| Python |
# Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
parser for footprint itself (identity parse)
"""
import xml_helpers as xmlh
from datetime import datetime
import re
# 90 days
DEFAULT_EXPIRATION = (90 * 86400)
# 10 years
DEFAULT_DURATION = (10 * 365 * 86400)
KNOWN_ELNAMES = [
'FeedInfo', 'FootprintFeed', 'Organization', 'Organizations',
'VolunteerOpportunities', 'VolunteerOpportunity', 'abstract', 'audienceTag',
'audienceTags', 'categoryTag', 'categoryTags', 'city',
'commitmentHoursPerWeek', 'contactEmail', 'contactName', 'contactPhone',
'country', 'createdDateTime', 'dateTimeDuration', 'dateTimeDurationType',
'dateTimeDurations', 'description', 'detailURL', 'directions', 'donateURL',
'duration', 'email', 'endDate', 'endTime', 'expires', 'fax', 'feedID',
'guidestarID', 'iCalRecurrence', 'language', 'latitude', 'lastUpdated',
'location', 'locationType', 'locations', 'logoURL', 'longitude', 'minimumAge',
'missionStatement', 'name', 'nationalEIN', 'openEnded', 'organizationID',
'organizationURL', 'paid', 'phone', 'postalCode', 'providerID',
'providerName', 'providerURL', 'region', 'schemaVersion', 'sexRestrictedEnum',
'sexRestrictedTo', 'skills', 'sponsoringOrganizationID', 'startDate',
'startTime', 'streetAddress1', 'streetAddress2', 'streetAddress3', 'title',
'tzOlsonPath', 'virtual', 'volunteerHubOrganizationID',
'volunteerOpportunityID', 'volunteersFilled', 'volunteersSlots',
'volunteersNeeded', 'yesNoEnum'
]
def set_default_time_elem(doc, entity, tagname, timest=xmlh.current_ts()):
"""footprint macro."""
cdt = xmlh.set_default_value(doc, entity, tagname, timest)
xmlh.set_default_attr(doc, cdt, "olsonTZ", "America/Los_Angeles")
def parse_fast(instr, maxrecs, progress):
"""fast parser but doesn't check correctness,
i.e. must be pre-checked by caller."""
numorgs = numopps = 0
outstr = '<?xml version="1.0" ?>'
outstr += '<FootprintFeed schemaVersion="0.1">'
# note: processes Organizations first, so ID lookups work
feedchunks = re.findall(
re.compile('<FeedInfo>.+?</FeedInfo>', re.DOTALL), instr)
for feedchunk in feedchunks:
node = xmlh.simple_parser(feedchunk, KNOWN_ELNAMES, False)
xmlh.set_default_value(node, node.firstChild, "feedID", "0")
set_default_time_elem(node, node.firstChild, "createdDateTime")
outstr += xmlh.prettyxml(node, True)
orgchunks = re.findall(
re.compile('<Organization>.+?</Organization>', re.DOTALL), instr)
outstr += '<Organizations>'
for orgchunk in orgchunks:
node = xmlh.simple_parser(orgchunk, KNOWN_ELNAMES, False)
numorgs += 1
outstr += xmlh.prettyxml(node, True)
outstr += '</Organizations>'
oppchunks = re.findall(
re.compile('<VolunteerOpportunity>.+?</VolunteerOpportunity>',
re.DOTALL), instr)
outstr += '<VolunteerOpportunities>'
for oppchunk in oppchunks:
node = xmlh.simple_parser(oppchunk, KNOWN_ELNAMES, False)
numopps += 1
if (maxrecs > 0 and numopps > maxrecs):
break
if progress and numopps % 250 == 0:
print datetime.now(), ": ", numopps, " records generated."
for opp in node.firstChild.childNodes:
if opp.nodeType == node.ELEMENT_NODE:
xmlh.set_default_value(node, opp, "volunteersNeeded", -8888)
xmlh.set_default_value(node, opp, "paid", "No")
xmlh.set_default_value(node, opp, "sexRestrictedTo", "Neither")
xmlh.set_default_value(node, opp, "language", "English")
set_default_time_elem(node, opp, "lastUpdated")
set_default_time_elem(node, opp, "expires",
xmlh.current_ts(DEFAULT_EXPIRATION))
for loc in opp.getElementsByTagName("location"):
xmlh.set_default_value(node, loc, "virtual", "No")
xmlh.set_default_value(node, loc, "country", "US")
for dttm in opp.getElementsByTagName("dateTimeDurations"):
xmlh.set_default_value(node, dttm, "openEnded", "No")
xmlh.set_default_value(node, dttm, "iCalRecurrence", "")
if (dttm.getElementsByTagName("startTime") == None and
dttm.getElementsByTagName("endTime") == None):
set_default_time_elem(node, dttm, "timeFlexible", "Yes")
else:
set_default_time_elem(node, dttm, "timeFlexible", "No")
xmlh.set_default_value(node, dttm, "openEnded", "No")
time_elems = opp.getElementsByTagName("startTime")
time_elems += opp.getElementsByTagName("endTime")
for el in time_elems:
xmlh.set_default_attr(node, el, "olsonTZ", "America/Los_Angeles")
outstr += xmlh.prettyxml(node, True)
outstr += '</VolunteerOpportunities>'
outstr += '</FootprintFeed>'
return outstr, numorgs, numopps
def parse(instr, maxrecs, progress):
"""return python DOM object given FPXML"""
# parsing footprint format is the identity operation
# TODO: maxrecs
# TODO: progress
if progress:
print datetime.now(), "parse_footprint: parsing ", len(instr), " bytes."
xmldoc = xmlh.simple_parser(instr, KNOWN_ELNAMES, progress)
if progress:
print datetime.now(), "parse_footprint: done parsing."
return xmldoc
| Python |
# Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
dumping ground for functions common across all parsers.
"""
from xml.dom import minidom
from datetime import datetime
import xml.sax.saxutils
import xml.parsers.expat
import re
import sys
import time
# asah: I give up, allowing UTF-8 is just too hard without incurring
# crazy performance penalties
SIMPLE_CHARS = ''.join(map(chr, range(32, 126)))
SIMPLE_CHARS_CLASS = '[^\\n%s]' % re.escape(SIMPLE_CHARS)
SIMPLE_CHARS_RE = re.compile(SIMPLE_CHARS_CLASS)
PROGRESS_START_TS = datetime.now()
def clean_string(instr):
"""return a string that's safe wrt. utf-8 encoding."""
#print "SIMPLE_CHARS_CLASS=",SIMPLE_CHARS_CLASS
instr = instr.decode('ascii', 'replace')
return SIMPLE_CHARS_RE.sub('', instr).encode('UTF-8')
def node_data(entity):
"""get the data buried in the given node and escape it."""
if (entity.firstChild == None):
return ""
if (entity.firstChild.data == None):
return ""
outstr = entity.firstChild.data
outstr = xml.sax.saxutils.escape(outstr).encode('UTF-8')
outstr = re.sub(r'\n', r'\\n', outstr)
return outstr
def get_children_by_tagname(elem, name):
"""get all the children with a given name."""
temp = []
for child in elem.childNodes:
if child.nodeType == child.ELEMENT_NODE and child.nodeName == name:
temp.append(child)
return temp
def print_progress(msg, filename="", progress=True):
"""print progress indicator."""
if progress:
print str(datetime.now())+":"+filename, msg
def print_status(msg, filename="", progress=True):
"""print status indicator, for stats collection."""
print_progress(msg, "STATUS:"+filename, progress)
def print_rps_progress(noun, progress, recno, maxrecs):
"""print a progress indicator."""
maxrecs_str = ""
if maxrecs > 0:
maxrecs_str = " of " + str(maxrecs)
if progress and recno > 0 and recno % 250 == 0:
now = datetime.now()
secs_since_start = now - PROGRESS_START_TS
secs_elapsed = 3600*24.0*secs_since_start.days + \
1.0*secs_since_start.seconds + \
secs_since_start.microseconds / 1000000.0
rps = recno / secs_elapsed
print str(now)+": ", recno, noun, "processed" + maxrecs_str +\
" ("+str(int(rps))+" recs/sec)"
def get_tag_val(entity, tag):
"""walk the DOM of entity looking for the first child named (tag)."""
#print "----------------------------------------"
nodes = entity.getElementsByTagName(tag)
#print "nodes: "
#print nodes
if (nodes.length == 0):
return ""
#print nodes[0]
if (nodes[0] == None):
return ""
if (nodes[0].firstChild == None):
return ""
if (nodes[0].firstChild.data == None):
return ""
#print nodes[0].firstChild.data
outstr = nodes[0].firstChild.data
outstr = xml.sax.saxutils.escape(outstr).encode('UTF-8')
outstr = re.sub(r'\n', r'\\n', outstr)
return outstr
def set_default_value(doc, entity, tagname, default_value):
"""add the element if not already present in the DOM tree."""
nodes = entity.getElementsByTagName(tagname)
if len(nodes) == 0:
newnode = doc.createElement(tagname)
newnode.appendChild(doc.createTextNode(str(default_value)))
entity.appendChild(newnode)
return newnode
return nodes[0]
def set_default_attr(doc, entity, attrname, default_value):
"""create and set the attribute if not already set."""
if entity.getAttributeNode(attrname) == None:
entity.setAttribute(attrname, default_value)
def validate_xml(xmldoc, known_elnames):
"""a simple XML validator, given known tagnames."""
for node in xmldoc.childNodes:
if (node.nodeType == node.ELEMENT_NODE and
node.tagName not in known_elnames):
#print "unknown tagName '"+node.tagName+"'"
pass
# TODO: spellchecking...
validate_xml(node, known_elnames)
def simple_parser(instr, known_elnames_list, progress):
"""a simple wrapper for parsing XML which attempts to handle errors."""
try:
if known_elnames_list:
known_elnames_dict = {}
for item in known_elnames_list:
known_elnames_dict[item] = True
if progress:
print datetime.now(), "parsing XML"
xmldoc = minidom.parseString(instr)
# this stuff is in a try-block to avoid use-before-def on xmldoc
if progress:
print datetime.now(), "validating XML..."
if known_elnames_list:
validate_xml(xmldoc, known_elnames_dict)
if progress:
print datetime.now(), "done."
return xmldoc
except xml.parsers.expat.ExpatError, err:
print datetime.now(), "XML parsing error on line ", err.lineno,
print ":", xml.parsers.expat.ErrorString(err.code),
print " (column ", err.offset, ")"
lines = instr.split("\n")
for i in range(err.lineno - 3, err.lineno + 3):
if i >= 0 and i < len(lines):
print "%6d %s" % (i+1, lines[i])
print "writing string to xmlerror.out..."
outfh = open("xmlerror.out", "w+")
outfh.write(instr)
outfh.close()
sys.exit(0)
def prettyxml(doc, strip_header = False):
"""return pretty-printed XML for doc."""
outstr = doc.toxml("UTF-8")
if strip_header:
outstr = re.sub(r'<\?xml version="1.0" encoding="UTF-8"\?>', r'', outstr)
outstr = re.sub(r'><', r'>\n<', outstr)
# toprettyxml wasn't that pretty...
return outstr
def output_val(name, val):
"""return <name>val</name>."""
return "<" + name + ">" + str(val) + "</" + name + ">"
def output_node(name, node, nodename):
"""return <name>get_tag_val(node)</name>."""
return output_val(name, get_tag_val(node, nodename))
def output_plural(name, val):
"""return <names><name>val</name></names>."""
return "<" + name + "s>" + output_val(name, val) + "</" + name + "s>"
def output_plural_node(name, node, nodename):
"""return <names><name>get_tag_val(node)</name></names>."""
return "<" + name + "s>" + output_node(name, node, nodename) + \
"</" + name + "s>"
def current_ts(delta_secs=0):
"""Return a formatted datetime string for the current time, e.g.
2008-12-30T14:30:10.5"""
return time.strftime("%Y-%m-%dT%H:%M:%S",
time.gmtime(time.mktime(time.gmtime()) + delta_secs))
def current_time(delta_secs=0):
"""Return a formatted time string for the current time, e.g. 14:30:10.5"""
return time.strftime("%H:%M:%S",
time.gmtime(time.mktime(time.gmtime()) + delta_secs))
def current_date(delta_secs=0):
"""Return a formatted date string for the current time, e.g. 2008-12-30"""
return time.strftime("%Y-%m-%d",
time.gmtime(time.mktime(time.gmtime()) + delta_secs))
| Python |
# Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
parser for craigslist custom crawl-- not FPXML
"""
# note: this is designed to consume the output from the craigslist crawler
# example record
#http://limaohio.craigslist.org/vol/1048151556.html-Q-<!DOCTYPE html PUBLIC
# "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose
#.dtd"> <html> <head> <title>Foster Parents Needed</title> <meta name="ro
#bots" content="NOARCHIVE"> <link rel="stylesheet" title="craigslist" href=
#"http://www.craigslist.org/styles/craigslist.css" type="text/css" media="al
#l"> </head> <body onload="initFlag(1048151556)" class="posting"> <div cl
#ass="bchead"> <a id="ef" href="/email.friend?postingID=1048151556">email th
#is posting to a friend</a> <a href="http://limaohio.craigslist.org">lima /
#findlay craigslist</a> > <a href="/vol/">volunteers</a> </div>
# <div id="flags"> <div id="flagMsg"> please <a href="http://www.craig
#slist.org/about/help/flags_and_community_moderation">flag</a> with care:
#</div> <div id="flagChooser"> <br> <a class="fl" id="flag16" href="
#/flag/?flagCode=16&postingID=1048151556" title="Wrong category, wro
#ng site, discusses another post, or otherwise misplaced"> miscategorize
#d</a> <br> <a class="fl" id="flag28" href="/flag/?flagCode=28&po
#stingID=1048151556" title="Violates craigslist Terms Of Use or other po
#sted guidelines"> prohibited</a> <br> <a class="fl" id="flag15"
#href="/flag/?flagCode=15&postingID=1048151556" title="Posted too fr
#equently, in multiple cities/categories, or is too commercial"> spam/ov
#erpost</a> <br> <a class="fl" id="flag9" href="/flag/?flagCode=9&
#;postingID=1048151556" title="Should be considered for inclusion in the
# Best-Of-Craigslist"> best of craigslist</a> <br> </div> </div>
# <h2>Foster Parents Needed (Northwest Ohio)</h2> <hr> Reply to: <a href="
#mailto:comm-10481515
#56@craigslist&#
#46;org?subject=Foster%20Parents%20Needed%20(Northwest%20Ohio
#)">comm-104815155
#;6@craigslist.&
##111;rg</a> <sup>[<a href="http://www.craigslist.org/about/help/r
#eplying_to_posts" target="_blank">Errors when replying to ads?</a>]</sup><b
#r> Date: 2009-02-24, 8:37AM EST<br> <br> <br> <div id="userbody"> Diversio
#n Adolescent Foster Care of Ohio is accepting applications for foster paren
#ts in our Findlay office. There are many children in Ohio in need of a tem
#porary place to call home. Foster parent training is currently being offere
#d. Please call Stacy for more information 800-824-3007. We look forward t
#o meeting with you. www.diversionfostercare.org <br> <table> <tr>
# <td></td> <td></td> </tr> <tr> <td></td> <td></td> </
#tr> </table> <br><br><ul> <li> Location: Northwest Ohio <li>it's NOT o
#k to contact this poster with services or other commercial interests</ul>
#</div> PostingID: 1048151556<br> <br> <hr> <br> <div class="clfooter">
# Copyright © 2009 craigslist, inc. <a hre
#f="http://www.craigslist.org/about/terms.of.use.html">terms of use</a> 
#; <a href="http://www.craigslist.org/about/privacy_policy"
#>privacy policy</a> <a href="/forums/?forumID=8">fee
#dback forum</a> </div> <script type="text/javascript" src="http://www.craig
#slist.org/js/jquery.js"></script> <script type="text/javascript" src="http:
#//www.craigslist.org/js/postings.js"></script> </body> </html>
import sys
import re
import xml.sax.saxutils
import xml_helpers as xmlh
import crawl_craigslist
from datetime import datetime
import dateutil.parser
CL_LATLONGS = None
def load_craigslist_latlongs():
"""map of craigslist sub-metros to their latlongs."""
global CL_LATLONGS
CL_LATLONGS = {}
latlongs_fh = open('craigslist-metro-latlongs.txt')
for line in latlongs_fh:
line = re.sub(r'\s*#.*$', '', line).strip()
if line == "":
continue
try:
url, lat, lng = line.strip().split("|")
except:
print "error parsing line", line
sys.exit(1)
CL_LATLONGS[url] = lat + "," + lng
latlongs_fh.close()
def extract(instr, rx):
"""find the first instance of rx in instr and strip it of whitespace."""
res = re.findall(rx, instr, re.DOTALL)
if len(res) > 0:
return res[0].strip()
return ""
# pylint: disable-msg=R0915
def parse(instr, maxrecs, progress):
"""return FPXML given craigslist data"""
if CL_LATLONGS == None:
load_craigslist_latlongs()
xmlh.print_progress("loading craigslist crawler output...")
crawl_craigslist.parse_cache_file(instr, listings_only=True)
xmlh.print_progress("loaded "+str(len(crawl_craigslist.pages))+" craigslist pages.")
# convert to footprint format
outstr = '<?xml version="1.0" ?>'
outstr += '<FootprintFeed schemaVersion="0.1">'
outstr += '<FeedInfo>'
outstr += '<feedID>1</feedID>'
outstr += '<providerID>105</providerID>'
outstr += '<providerName>craigslist.org</providerName>'
outstr += '<providerURL>http://www.craigslist.org/</providerURL>'
outstr += '<createdDateTime>%s</createdDateTime>' % xmlh.current_ts()
outstr += '</FeedInfo>'
numorgs = numopps = 0
# no "organization" in craigslist postings
outstr += '<Organizations>'
outstr += '<Organization>'
outstr += '<organizationID>0</organizationID>'
outstr += '<nationalEIN></nationalEIN>'
outstr += '<name></name>'
outstr += '<missionStatement></missionStatement>'
outstr += '<description></description>'
outstr += '<location>'
outstr += xmlh.output_val("city", "")
outstr += xmlh.output_val("region", "")
outstr += xmlh.output_val("postalCode", "")
outstr += '</location>'
outstr += '<organizationURL></organizationURL>'
outstr += '<donateURL></donateURL>'
outstr += '<logoURL></logoURL>'
outstr += '<detailURL></detailURL>'
outstr += '</Organization>'
numorgs += 1
outstr += '</Organizations>'
skipped_listings = {}
skipped_listings["body"] = skipped_listings["title"] = \
skipped_listings["not-ok"] = 0
outstr += '<VolunteerOpportunities>'
for i, url in enumerate(crawl_craigslist.pages):
page = crawl_craigslist.pages[url]
ok = extract(page, "it's OK to distribute this "+
"charitable volunteerism opportunity")
if ok == "":
skipped_listings["not-ok"] += 1
continue
title = extract(page, "<title>(.+?)</title>")
if title == "":
skipped_listings["title"] += 1
continue
body = extract(page, '<div id="userbody">(.+?)<')
if len(body) < 25:
skipped_listings["body"] += 1
continue
item_id = extract(url, "/vol/(.+?)[.]html$")
locstr = extract(page, "Location: (.+?)<")
datestr = extract(page, "Date: (.+?)<")
ts = dateutil.parser.parse(datestr)
datetimestr = ts.strftime("%Y-%m-%dT%H:%M:%S")
datestr = ts.strftime("%Y-%m-%d")
if (maxrecs>0 and i>maxrecs):
break
xmlh.print_rps_progress("opps", progress, i, maxrecs)
if progress and i > 0 and i % 250 == 0:
msg = "skipped " + str(skipped_listings["title"]+skipped_listings["body"])
msg += " listings ("+str(skipped_listings["title"]) + " for no-title and "
msg += str(skipped_listings["body"]) + " for short body and "
msg += str(skipped_listings["not-ok"]) + " for no-redistrib)"
xmlh.print_progress(msg)
#print "---"
#print "title:",title
#print "loc:",locstr
#print "date:",datestr
#print "body:",body[0:100]
outstr += '<VolunteerOpportunity>'
outstr += '<volunteerOpportunityID>%s</volunteerOpportunityID>' % (item_id)
outstr += '<sponsoringOrganizationIDs><sponsoringOrganizationID>0</sponsoringOrganizationID></sponsoringOrganizationIDs>'
outstr += '<volunteerHubOrganizationIDs><volunteerHubOrganizationID>0</volunteerHubOrganizationID></volunteerHubOrganizationIDs>'
outstr += '<title>%s</title>' % (title)
outstr += '<detailURL>%s</detailURL>' % (url)
# avoid CDATA in body...
esc_body = xml.sax.saxutils.escape(body)
esc_body100 = xml.sax.saxutils.escape(body[0:100])
outstr += '<description>%s</description>' % (esc_body)
outstr += '<abstract>%s</abstract>' % (esc_body100 + "...")
outstr += '<lastUpdated>%s</lastUpdated>' % (datetimestr)
# TODO: expires
# TODO: synthesize location from metro...
outstr += '<locations><location>'
outstr += '<name>%s</name>' % (xml.sax.saxutils.escape(locstr))
# what about the few that do geocode?
lat, lng = "", ""
try:
domain, unused = url.split("vol/")
lat, lng = CL_LATLONGS[domain].split(",")
except:
# ignore for now
#print url
#continue
pass
outstr += '<latitude>%s</latitude>' % (lat)
outstr += '<longitude>%s</longitude>' % (lng)
outstr += '</location></locations>'
#outstr += '<locations><location>'
#outstr += '<city>%s</city>' % (
#outstr += '<region>%s</region>' % (
#outstr += '</location></locations>'
outstr += '<dateTimeDurations><dateTimeDuration>'
outstr += '<openEnded>No</openEnded>'
outstr += '<startDate>%s</startDate>' % (datestr)
# TODO: endDate = startDate + N=14 days?
# TODO: timezone???
#outstr += '<endDate>%s</endDate>' % (
outstr += '</dateTimeDuration></dateTimeDurations>'
# TODO: categories???
#outstr += '<categoryTags>'
outstr += '</VolunteerOpportunity>'
numopps += 1
outstr += '</VolunteerOpportunities>'
outstr += '</FootprintFeed>'
#outstr = re.sub(r'><([^/])', r'>\n<\1', outstr)
return outstr, numorgs, numopps
| Python |
#!/usr/bin/python
# Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#http://usaservice.org/page/event/search_results?orderby=day&state=CA&country=US&event_type%5b%5d=&limit=1000&radius_unit=miles&format=commons_rss&wrap=no
from xml.dom import minidom
import sys
import os
import urllib
import re
import thread
import time
from datetime import datetime
import socket
DEFAULT_TIMEOUT = 30
socket.setdefaulttimeout(DEFAULT_TIMEOUT)
STATES = ['AA','AE','AK','AL','AP','AR','AS','AZ','CA','CO','CT','DC','DE','FL','FM','GA','GU','HI','IA','ID','IL','IN','KS','KY','LA','MA','MD','ME','MH','MI','MN','MO','MP','MS','MT','NC','ND','NE','NH','NJ','NM','NV','NY','OH','OK','OR','PA','PR','PW','RI','SC','SD','TN','TX','UT','VA','VI','VT','WA','WI','WV','WY','AB','BC','MB','NB','NL','NT','NS','NU','ON','PE','QC','SK','YT','na']
OUTPUT_FN = "usaservice.txt"
file_lock = thread.allocate_lock()
crawlers = 0
crawlers_lock = thread.allocate_lock()
def get_url(state):
url = "http://usaservice.org/page/event/search_results?orderby=day&state="
url += state+"&country=US&event_type%5b%5d=&limit=1000&radius_unit=miles&format=commons_rss&wrap=no"
return url
def crawl_state(state, ignore):
global crawlers, crawlers_lock, OUTPUT_FN, file_lock
crawlers_lock.acquire()
crawlers = crawlers + 1
crawlers_lock.release()
while crawlers > 10:
time.sleep(1)
try:
url = get_url(state)
fh = urllib.urlopen(url)
rss = fh.read()
fh.close()
items = re.findall(r'<item>.+?</item>', rss, re.DOTALL)
if len(items) > 0:
print datetime.now(), "found", len(items), "items for state", state
outstr = ""
for item in items:
item = re.sub(r'(?:\r?\n|\r)',' ', item)
if re.search(r'Find Money For Next 12 Months', item):
continue
outstr += item + "\n"
file_lock.acquire()
outfh = open(OUTPUT_FN, "a")
outfh.write(outstr)
outfh.close()
file_lock.release()
except:
pass
crawlers_lock.acquire()
crawlers = crawlers - 1
crawlers_lock.release()
from optparse import OptionParser
if __name__ == "__main__":
try:
os.unlink(OUTPUT_FN)
except:
pass
for state in STATES:
thread.start_new_thread(crawl_state, (state, "foo"))
# give them a chance to start
time.sleep(1)
while (crawlers > 0):
print datetime.now(), "waiting for", crawlers, "crawlers to finish."
time.sleep(1)
sys.exit(0)
| Python |
# Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
parser for Hands On Network
"""
import xml_helpers as xmlh
import re
from datetime import datetime
ORGS = {}
ORGIDS = {}
MAX_ORGID = 0
def register_org(orgname, orgstr):
"""register the organization info, for lookup later."""
global MAX_ORGID
if orgname in ORGIDS:
return ORGIDS[orgname]
MAX_ORGID = MAX_ORGID + 1
orgstr = '<Organization>'
orgstr += '<organizationID>%d</organizationID>' % (len(ORGIDS))
orgstr += '<nationalEIN></nationalEIN>'
orgstr += '<name>%s</name>' % (orgname)
orgstr += '<missionStatement></missionStatement>'
orgstr += '<description></description>'
orgstr += '<location>'
orgstr += xmlh.output_val("city", "")
orgstr += xmlh.output_val("region", "")
orgstr += xmlh.output_val("postalCode", "")
orgstr += '</location>'
orgstr += '<organizationURL></organizationURL>'
orgstr += '<donateURL></donateURL>'
orgstr += '<logoURL></logoURL>'
orgstr += '<detailURL></detailURL>'
orgstr += '</Organization>'
ORGS[MAX_ORGID] = orgstr
ORGIDS[orgname] = MAX_ORGID
return MAX_ORGID
# pylint: disable-msg=R0915
def parse(instr, maxrecs, progress):
"""return FPXML given americorps data"""
# TODO: progress
known_elnames = [
'Abstract', 'Categories', 'Category', 'CategoryID', 'Country', 'DateListed',
'Description', 'DetailURL', 'Duration', 'DurationQuantity', 'DurationUnit',
'EndDate', 'KeyWords', 'LocalID', 'Location', 'LocationClassification',
'LocationClassificationID', 'LocationClassifications', 'Locations',
'LogoURL', 'Name', 'OpportunityDate', 'OpportunityDates', 'OpportunityType',
'OpportunityTypeID', 'SponsoringOrganization', 'SponsoringOrganizations',
'StartDate', 'StateOrProvince', 'Title', 'VolunteerOpportunity',
'ZipOrPostalCode' ]
numorgs = numopps = 0
instr = re.sub(r'<(/?db):', r'<\1_', instr)
opps = re.findall(r'<VolunteerOpportunity>.+?</VolunteerOpportunity>',
instr, re.DOTALL)
volopps = ""
for i, oppstr in enumerate(opps):
if (maxrecs > 0 and i > maxrecs):
break
xmlh.print_rps_progress("opps", progress, i, maxrecs)
item = xmlh.simple_parser(oppstr, known_elnames, progress=False)
# SponsoringOrganization/Name -- fortunately, no conflicts
# but there's no data except the name
orgname = xmlh.get_tag_val(item, "Name")
orgid = register_org(orgname, orgname)
# logoURL -- sigh, this is for the opportunity not the org
volopps += '<VolunteerOpportunity>'
volopps += xmlh.output_val('volunteerOpportunityID', str(i))
volopps += xmlh.output_val('sponsoringOrganizationID', str(orgid))
volopps += xmlh.output_node('volunteerHubOrganizationID', item, "LocalID")
volopps += xmlh.output_node('title', item, "Title")
volopps += xmlh.output_node('abstract', item, "Abstract")
volopps += xmlh.output_node('description', item, "Description")
volopps += xmlh.output_node('detailURL', item, "DetailURL")
volopps += xmlh.output_val('volunteersNeeded', "-8888")
oppdates = item.getElementsByTagName("OpportunityDate")
if (oppdates.length != 1):
print datetime.now(), \
"parse_americorps.py: only 1 OpportunityDate supported."
return None
oppdate = oppdates[0]
volopps += '<dateTimeDurations><dateTimeDuration>'
volopps += xmlh.output_val('openEnded', 'No')
volopps += xmlh.output_val('duration', 'P%s%s' %
(xmlh.get_tag_val(oppdate, "DurationQuantity"),
xmlh.get_tag_val(oppdate, "DurationUnit")))
volopps += xmlh.output_val('commitmentHoursPerWeek', '0')
volopps += xmlh.output_node('startDate', oppdate, "StartDate")
volopps += xmlh.output_node('endDate', oppdate, "EndDate")
volopps += '</dateTimeDuration></dateTimeDurations>'
volopps += '<locations>'
opplocs = item.getElementsByTagName("Location")
for opploc in opplocs:
volopps += '<location>'
volopps += xmlh.output_node('region', opploc, "StateOrProvince")
volopps += xmlh.output_node('country', opploc, "Country")
volopps += xmlh.output_node('postalCode', opploc, "ZipOrPostalCode")
volopps += '</location>'
volopps += '</locations>'
volopps += '<categoryTags/>'
volopps += '</VolunteerOpportunity>'
numopps += 1
# convert to footprint format
outstr = '<?xml version="1.0" ?>'
outstr += '<FootprintFeed schemaVersion="0.1">'
outstr += '<FeedInfo>'
# TODO: assign provider IDs?
outstr += xmlh.output_val('providerID', '106')
outstr += xmlh.output_val('providerName', 'networkforgood')
outstr += xmlh.output_val('feedID', 'americorps')
outstr += xmlh.output_val('createdDateTime', xmlh.current_ts())
outstr += xmlh.output_val('providerURL', 'http://www.networkforgood.org/')
outstr += xmlh.output_val('description', 'Americorps')
# TODO: capture ts -- use now?!
outstr += '</FeedInfo>'
# hardcoded: Organization
outstr += '<Organizations>'
for key in ORGS:
outstr += ORGS[key]
numorgs += 1
outstr += '</Organizations>'
outstr += '<VolunteerOpportunities>'
outstr += volopps
outstr += '</VolunteerOpportunities>'
outstr += '</FootprintFeed>'
#outstr = re.sub(r'><([^/])', r'>\n<\1', outstr)
return outstr, numorgs, numopps
| Python |
# Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
parser for Hands On Network
"""
# <VolunteerOpportunity>
# <LocalID>7702:76159:578625</LocalID>
# <AffiliateID>7702</AffiliateID>
# <OrgLocalID>578625</OrgLocalID>
# <Categories>
# <Category><CategoryID>5</CategoryID></Category>
# <Category><CategoryID>6</CategoryID></Category>
# </Categories>
# <DateListed>2008-07-08</DateListed>
# <OpportunityType><OpportunityTypeID>1</OpportunityTypeID></OpportunityType>
# <Title>HHSB Arts & Crafts (FX)</Title>
# <DetailURL>http://www.HandsOnMiami.org/projects/viewProject.php?..</DetailURL>
# <Description>Join HOM at the Hebrew Home of South Beach </Description>
# <LogoURL>http://www.HandsOnMiami.org/uploaded_files/....gif</LogoURL>
# <LocationClassifications><LocationClassification><LocationClassificationID>1
# </LocationClassificationID></LocationClassification></LocationClassifications>
# <Locations>
# <Location>
# <Address1>Hebrew Home of South Beach</Address1>
# <Address2>320 Collins Avenue</Address2>
# <City>Miami Beach</City>
# <StateOrProvince>FL</StateOrProvince>
# <ZipOrPostalCode>33139</ZipOrPostalCode>
# <Country>USA</Country>
# </Location>
# </Locations>
# <OpportunityDates>
# <OpportunityDate>
# <StartDate>2008-08-09</StartDate>
# <EndDate>2008-08-09</EndDate>
# <StartTime>10:00:00</StartTime>
# <EndTime>11:30:00</EndTime>
# </OpportunityDate>
# <OpportunityDate>
# <StartDate>2008-08-23</StartDate>
# <EndDate>2008-08-23</EndDate>
# <StartTime>10:00:00</StartTime>
# <EndTime>11:30:00</EndTime>
# </OpportunityDate>
# </OpportunityDates>
#
# <SponsoringOrganizations>
# <SponsoringOrganization>
# <Name>Hebrew Home of South Beach</Name>
# <Description>Hebrew Home of South Beach; Residential... </Description>
# <Country>USA</Country>
# <Phone>305-672-6464</Phone>
# <Extension>220</Extension>
# </SponsoringOrganization>
# </SponsoringOrganizations>
# </VolunteerOpportunity>
import xml_helpers as xmlh
import re
from datetime import datetime
# pylint: disable-msg=R0915
def parse(instr, maxrecs, progress):
"""return FPXML given handsonnetwork data"""
if progress:
print datetime.now(), "parse_handsonnetwork.Parse: starting parse..."
known_elnames = [
'Address1', 'Address2', 'AffiliateID', 'Categories', 'Category', 'City',
'Country', 'DateListed', 'Description', 'DetailURL', 'EndDate', 'EndTime',
'Extension', 'LocalID', 'Location', 'LocationClassifications',
'Locations', 'LogoURL', 'Name', 'OpportunityDate', 'OpportunityDates',
'OpportunityType', 'OrgLocalID', 'Phone', 'SponsoringOrganization',
'SponsoringOrganizations', 'StartDate', 'StartTime', 'StateOrProvince',
'Title', 'VolunteerOpportunity', 'ZipOrPostalCode'
]
# convert to footprint format
outstr = '<?xml version="1.0" ?>'
outstr += '<FootprintFeed schemaVersion="0.1">'
outstr += '<FeedInfo>'
# TODO: assign provider IDs?
outstr += '<providerID>102</providerID>'
outstr += '<providerName>handsonnetwork.org</providerName>'
outstr += '<feedID>1</feedID>'
# TODO: get/create real feed date
outstr += '<createdDateTime>%s</createdDateTime>' % xmlh.current_ts()
outstr += '<providerURL>http://www.handsonnetwork.org/</providerURL>'
outstr += '<description></description>'
# TODO: capture ts -- use now?!
outstr += '</FeedInfo>'
numorgs = numopps = 0
# hardcoded: Organization
outstr += '<Organizations>'
sponsor_ids = {}
sponsorstrs = re.findall(
r'<SponsoringOrganization>.+?</SponsoringOrganization>', instr, re.DOTALL)
for i, orgstr in enumerate(sponsorstrs):
if progress and i > 0 and i % 250 == 0:
print str(datetime.now())+": ", i, " orgs processed."
org = xmlh.simple_parser(orgstr, known_elnames, False)
#sponsors = xmldoc.getElementsByTagName("SponsoringOrganization")
#for i,org in enumerate(sponsors):
outstr += '<Organization>'
name = xmlh.get_tag_val(org, "Name")
desc = xmlh.get_tag_val(org, "Description")
outstr += '<organizationID>%d</organizationID>' % (i+1)
outstr += '<nationalEIN></nationalEIN>'
outstr += '<name>%s</name>' % (xmlh.get_tag_val(org, "Name"))
outstr += '<missionStatement></missionStatement>'
outstr += '<description>%s</description>' % \
(xmlh.get_tag_val(org, "Description"))
# unmapped: Email
# unmapped: Phone
# unmapped: Extension
outstr += '<location>'
#outstr += '<city>%s</city>' % (xmlh.get_tag_val(org, "City"))
#outstr += '<region>%s</region>' % (xmlh.get_tag_val(org, "State"))
#outstr += '<postalCode>%s</postalCode>' % \
# (xmlh.get_tag_val(org, "PostalCode"))
outstr += '<country>%s</country>' % (xmlh.get_tag_val(org, "Country"))
outstr += '</location>'
outstr += '<organizationURL>%s</organizationURL>' % \
(xmlh.get_tag_val(org, "URL"))
outstr += '<donateURL></donateURL>'
outstr += '<logoURL></logoURL>'
outstr += '<detailURL></detailURL>'
outstr += '</Organization>'
numorgs += 1
sponsor_ids[name+desc] = i+1
outstr += '</Organizations>'
outstr += '<VolunteerOpportunities>'
#items = xmldoc.getElementsByTagName("VolunteerOpportunity")
#if (maxrecs > items.length):
# maxrecs = items.length
#for item in items[0:maxrecs-1]:
if progress:
print datetime.now(), "finding VolunteerOpportunities..."
opps = re.findall(r'<VolunteerOpportunity>.+?</VolunteerOpportunity>',
instr, re.DOTALL)
for i, oppstr in enumerate(opps):
if (maxrecs > 0 and i > maxrecs):
break
xmlh.print_rps_progress("opps", progress, i, maxrecs)
opp = xmlh.simple_parser(oppstr, known_elnames, False)
orgs = opp.getElementsByTagName("SponsoringOrganization")
name = xmlh.get_tag_val(orgs[0], "Name")
desc = xmlh.get_tag_val(orgs[0], "Description")
sponsor_id = sponsor_ids[name+desc]
oppdates = opp.getElementsByTagName("OpportunityDate")
if (oppdates == None or oppdates.count == 0):
oppdates = [ None ]
else:
# unmapped: LogoURL
# unmapped: OpportunityTypeID (categoryTag?)
# unmapped: LocationClassificationID (flatten)
datestr_pre = xmlh.output_val('volunteerOpportunityID', opp, "LocalID")
datestr_pre = xmlh.output_plural('sponsoringOpportunityID', sponsor_id)
# unmapped: OrgLocalID
datestr_pre = xmlh.output_plural_node('volunteerHubOrganizationID',
opp, "AffiliateID")
datestr_pre = xmlh.output_node('title', opp, "Title")
datestr_pre += '<abstract></abstract>'
datestr_pre += '<volunteersNeeded>-8888</volunteersNeeded>'
locations = opp.getElementsByTagName("Location")
if (locations.length != 1):
print datetime.now(), "parse_handsonnetwork: only 1 location supported."
return None
loc = locations[0]
datestr_post = '<locations><location>'
# yuck, uses address1 for venue name... sometimes...
#no way to detect: presence of numbers?
datestr_post += xmlh.output_node('streetAddress1', loc, "Address1")
datestr_post += xmlh.output_node('streetAddress2', loc, "Address2")
datestr_post += xmlh.output_node('city', loc, "City")
datestr_post += xmlh.output_node('region', loc, "State")
datestr_post += xmlh.output_node('country', loc, "Country")
datestr_post += xmlh.output_node('postalCode', loc, "ZipOrPostalCode")
# no equivalent: latitude, longitude
datestr_post += '</location></locations>'
datestr_post += xmlh.output_node('detailURL', opp, "DetailURL")
datestr_post += xmlh.output_node('description', opp, "Description")
datestr_post += xmlh.output_val('lastUpdated', opp,
'%sT00:00:00' % (xmlh.get_tag_val(opp, "DateListed")))
oppcount = 0
datetimedur = ''
for oppdate in oppdates:
oppcount = oppcount + 1
if progress:
if numopps % 250 == 0:
print datetime.now(), ": ", numopps, " records generated."
datetimedur += '<dateTimeDuration>'
if oppdate == None:
datetimedur += '<openEnded>Yes</openEnded>'
else:
datetimedur += '<openEnded>No</openEnded>'
# hardcoded: commitmentHoursPerWeek
datetimedur += '<commitmentHoursPerWeek>0</commitmentHoursPerWeek>'
# TODO: timezone
datetimedur += xmlh.output_node("startDate", oppdate, "StartDate")
datetimedur += xmlh.output_node("endDate", oppdate, "EndDate")
datetimedur += xmlh.output_node("startTime", oppdate, "StartTime")
datetimedur += xmlh.output_node("endTime", oppdate, "EndTime")
datetimedur += '</dateTimeDuration>'
if oppcount == 0: # insert an open ended datetimeduration
datetimedur = '<dateTimeDuration><openEnded>'
datetimedur += 'Yes</openEnded></dateTimeDuration>'
outstr += '<VolunteerOpportunity>'
outstr += datestr_pre
outstr += '<dateTimeDurations>'
outstr += datetimedur
outstr += '</dateTimeDurations>'
outstr += datestr_post
outstr += '</VolunteerOpportunity>'
numopps += 1
outstr += '</VolunteerOpportunities>'
outstr += '</FootprintFeed>'
#outstr = re.sub(r'><([^/])', r'>\n<\1', outstr)
return outstr, numorgs, numopps
| Python |
#!/usr/bin/python
# Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
main() for the crawling/parsing/loading pipeline
"""
#from xml.dom.ext import PrettyPrint
import gzip
import hashlib
import urllib
import re
from datetime import datetime
import parse_footprint
import parse_gspreadsheet
import parse_usaservice
import parse_handsonnetwork
import parse_idealist
import parse_craigslist
import parse_americorps
import parse_userpostings
import parse_servenet
import parse_volunteermatch
import subprocess
import sys
import time
import xml_helpers as xmlh
from optparse import OptionParser
import dateutil
import dateutil.tz
import dateutil.parser
FIELDSEP = "\t"
RECORDSEP = "\n"
MAX_ABSTRACT_LEN = 300
DEBUG = False
PROGRESS = False
PRINTHEAD = False
ABRIDGED = False
OUTPUTFMT = "fpxml"
# set a nice long timeout
import socket
socket.setdefaulttimeout(600.0)
# pick a latlng that'll never match real queries
UNKNOWN_LAT = UNKNOWN_LNG = "-10"
UNKNOWN_LATLNG = UNKNOWN_LAT + "," + UNKNOWN_LNG
# pick a latlng that'll never match real queries
LOCATIONLESS_LAT = LOCATIONLESS_LNG = "0"
LOCATIONLESS_LATLNG = LOCATIONLESS_LAT + "," + LOCATIONLESS_LNG
HEADER_ALREADY_OUTPUT = False
#BASE_PUB_URL = "http://change.gov/"
BASE_PUB_URL = "http://adamsah.net/"
SEARCHFIELDS = {
# required
"description":"builtin",
"event_date_range":"builtin",
"link":"builtin",
"location":"builtin",
"title":"builtin",
# needed for search restricts
"latitude":"float",
"longitude":"float",
# needed for basic search results
"id":"builtin",
"detailURL":"URL",
"abstract":"string",
"location_string":"string",
"feed_providerName":"string",
}
FIELDTYPES = {
"title":"builtin",
"description":"builtin",
"link":"builtin",
"event_type":"builtin",
"quantity":"builtin",
"image_link":"builtin",
"event_date_range":"builtin",
"id":"builtin",
"location":"builtin",
"paid":"boolean",
"openended":"boolean",
"volunteersSlots":"integer",
"volunteersFilled":"integer",
"volunteersNeeded":"integer",
"minimumAge":"integer",
"latitude":"float",
"longitude":"float",
"providerURL":"URL",
"detailURL":"URL",
"org_organizationURL":"URL",
"org_logoURL":"URL",
"org_providerURL":"URL",
"feed_providerURL":"URL",
"lastUpdated":"dateTime",
"expires":"dateTime",
"feed_createdDateTime":"dateTime",
# note: type "location" isn"t safe because the Base geocoder can fail,
# causing the record to be rejected
"duration":"string",
"abstract":"string",
"sexRestrictedTo":"string",
"skills":"string",
"contactName":"string",
"contactPhone":"string",
"contactEmail":"string",
"language":"string",
"org_name":"string",
"org_missionStatement":"string",
"org_description":"string",
"org_phone":"string",
"org_fax":"string",
"org_email":"string",
"categories":"string",
"audiences":"string",
"commitmentHoursPerWeek":"string",
"employer":"string",
"feed_providerName":"string",
"feed_description":"string",
"providerID":"string",
"feed_providerID":"string",
"feedID":"string",
"opportunityID":"string",
"organizationID":"string",
"sponsoringOrganizationID":"strng",
"volunteerHubOrganizationID":"string",
"org_nationalEIN":"string",
"org_guidestarID":"string",
"venue_name":"string",
"location_string":"string",
"orgLocation":"string",
"hidden_details":"string",
}
def print_progress(msg, filename="", progress=None):
"""print progress indicator."""
# not allowed to say progress=PROGRESS as a default arg
if progress == None:
progress = PROGRESS
xmlh.print_progress(msg, filename, progress=progress)
def print_status(msg, filename="", progress=None):
"""print status indicator, for stats collection."""
if progress == None:
progress = PROGRESS
xmlh.print_status(msg, filename, progress=progress)
# Google Base uses ISO8601... in PST -- I kid you not:
# http://base.google.com/support/bin/answer.py?
# answer=78170&hl=en#Events%20and%20Activities
# and worse, you have to change an env var in python...
def convert_dt_to_gbase(datestr, timestr, timezone):
"""converts dates like YYYY-MM-DD, times like HH:MM:SS and
timezones like America/New_York, into Google Base format."""
try:
tzinfo = dateutil.tz.tzstr(timezone)
except:
tzinfo = dateutil.tz.tzutc()
try:
timestr = dateutil.parser.parse(datestr + " " + timestr)
except:
print "error parsing datetime: "+datestr+" "+timestr
timestr = timestr.replace(tzinfo=tzinfo)
pst = dateutil.tz.tzstr("PST8PDT")
timestr = timestr.astimezone(pst)
if timestr.year < 1900:
timestr = timestr.replace(year=timestr.year+1900)
res = timestr.strftime("%Y-%m-%dT%H:%M:%S")
res = re.sub(r'Z$', '', res)
return res
CSV_REPEATED_FIELDS = ['categories', 'audiences']
DIRECT_MAP_FIELDS = [
'opportunityID', 'organizationID', 'volunteersSlots', 'volunteersFilled',
'volunteersNeeded', 'minimumAge', 'sexRestrictedTo', 'skills', 'contactName',
'contactPhone', 'contactEmail', 'providerURL', 'language', 'lastUpdated',
'expires', 'detailURL']
ORGANIZATION_FIELDS = [
'nationalEIN', 'guidestarID', 'name', 'missionStatement', 'description',
'phone', 'fax', 'email', 'organizationURL', 'logoURL', 'providerURL']
def flattener_value(node):
"""return a DOM node's first child, sans commas"""
if (node.firstChild != None):
return node.firstChild.data.replace(",", "")
else:
return ""
def flatten_to_csv(domnode):
"""prints the children of a DOM node as CSV separated strings"""
# pylint: disable-msg=W0141
return ",".join(filter(lambda x: x != "",
map(flattener_value, domnode.childNodes)))
def output_field(name, value):
"""print a field value, handling long strings, header lines and
custom datatypes."""
#global PRINTHEAD, DEBUG
if PRINTHEAD:
if name not in FIELDTYPES:
print datetime.now(), "no type for field: " + name + FIELDTYPES[name]
sys.exit(1)
elif FIELDTYPES[name] == "builtin":
return name
elif OUTPUTFMT == "basetsv":
return "c:"+name+":"+FIELDTYPES[name]
else:
return name+":"+FIELDTYPES[name]
if OUTPUTFMT == "basetsv":
# grr: Base tries to treat commas in custom fields as being lists ?!
# http://groups.google.com/group/base-help-basics/browse_thread/thread/
# c4f51447191a6741
# TODO: note that this may cause fields to expand beyond their maxlen
# (e.g. abstract)
value = re.sub(r',', ';;', value)
if DEBUG:
if (len(value) > 70):
value = value[0:67] + "... (" + str(len(value)) + " bytes)"
return name.rjust(22) + " : " + value
if (FIELDTYPES[name] == "dateTime"):
return convert_dt_to_gbase(value, "", "UTC")
return value
def get_addr_field(node, field):
"""assuming a node is named (field), return it with optional trailing spc."""
addr = xmlh.get_tag_val(node, field)
if addr != "":
addr += " "
return addr
def city_loc_fields(node):
"""synthesize a city-region-postal-country string."""
# note: avoid commas, so it works with CSV
# (this is good enough for the geocoder)
loc = ""
loc += get_addr_field(node, "city")
loc += get_addr_field(node, "region")
loc += get_addr_field(node, "postalCode")
loc += get_addr_field(node, "country")
return loc
def compute_loc_field(node):
"""concatenate street address fields"""
loc = get_addr_field(node, "streetAddress1")
loc += get_addr_field(node, "streetAddress2")
loc += get_addr_field(node, "streetAddress3")
return loc
def compute_city_field(node):
"""concatenate street address and city/region/postal/country fields"""
loc = compute_loc_field(node)
loc += city_loc_fields(node)
return loc
def lookup_loc_fields(node):
"""try a multitude of field combinations to get a geocode."""
fullloc = loc = compute_city_field(node)
latlng = xmlh.get_tag_val(node, "latitude") + ","
latlng += xmlh.get_tag_val(node, "longitude")
if latlng == ",":
latlng = geocode(loc)
if latlng == "":
# sometimes address1 contains un-geocodable descriptive language,
# e.g. venue name, "around the corner from ..." etc.
loc = get_addr_field(node, "streetAddress2")
loc += get_addr_field(node, "streetAddress3")
loc += city_loc_fields(node)
latlng = geocode(loc)
if latlng == "":
# rarely, addr1 & addr are both descriptive
loc = get_addr_field(node, "streetAddress3")
loc += city_loc_fields(node)
latlng = geocode(loc)
if latlng == "":
# missing or bogus address lines
loc = city_loc_fields(node)
if latlng == "":
# missing or bogus city name
loc = get_addr_field(node, "postalCode")
loc += get_addr_field(node, "country")
latlng = geocode(loc)
if latlng == "":
# missing or bogus postalcode
loc = get_addr_field(node, "city")
loc += get_addr_field(node, "region")
loc += get_addr_field(node, "country")
latlng = geocode(loc)
if latlng == "":
loc += get_addr_field(node, "region")
loc += get_addr_field(node, "country")
latlng = geocode(loc)
# TODO: get more sophisticated about reverse geocoding
# entries missing some fields
if latlng == "":
latlng = "0,0"
else:
if fullloc == "":
fullloc = reverse_geocode(latlng)
if loc == "":
loc = reverse_geocode(latlng)
if DEBUG:
print datetime.now(), "geocode: " + loc + "=" + latlng
return (fullloc, latlng, loc)
def output_loc_field(node, mapped_name):
"""macro for output_field( convert node to loc field )"""
return output_field(mapped_name,
compute_loc_field(node)+city_loc_fields(node))
def output_tag_value(node, fieldname):
"""macro for output_field( get node value )"""
return output_field(fieldname, xmlh.get_tag_val(node, fieldname))
def output_tag_value_renamed(node, xmlname, newname):
"""macro for output_field( get node value ) then emitted as newname"""
return output_field(newname, xmlh.get_tag_val(node, xmlname))
def compute_stable_id(opp, org, locstr, openended, duration,
hrs_per_week, startend):
"""core algorithm for computing an opportunity's unique id."""
if DEBUG:
print "opp=" + str(opp) # shuts up pylint
eid = xmlh.get_tag_val(org, "nationalEIN")
if (eid == ""):
# support informal "organizations" that lack EINs
eid = xmlh.get_tag_val(org, "organizationURL")
# TODO: if two providers have same listing, good odds the
# locations will be slightly different...
loc = locstr
# TODO: if two providers have same listing, the time info
# is unlikely to be exactly the same, incl. missing fields
timestr = openended + duration + hrs_per_week + startend
return hashlib.md5(eid + loc + timestr).hexdigest()
def get_abstract(opp):
"""process abstract-- shorten, strip newlines and formatting."""
abstract = xmlh.get_tag_val(opp, "abstract")
if abstract == "":
abstract = xmlh.get_tag_val(opp, "description")
# strip \n and \b
abstract = re.sub(r'(\\[bn])+', ' ', abstract)
# strip XML escaped chars
abstract = re.sub(r'&([a-z]+|#[0-9]+);', '', abstract)
abstract = abstract[:MAX_ABSTRACT_LEN]
return abstract
def get_direct_mapped_fields(opp, org):
"""map a field directly from FPXML to Google Base."""
if ABRIDGED:
outstr = output_field("abstract", get_abstract(opp))
return outstr
outstr = ""
paid = xmlh.get_tag_val(opp, "paid")
if (paid == "" or paid.lower()[0] != "y"):
paid = "n"
else:
paid = "y"
outstr += output_field("paid", paid)
for field in DIRECT_MAP_FIELDS:
outstr += FIELDSEP + output_tag_value(opp, field)
for field in ORGANIZATION_FIELDS:
outstr += FIELDSEP + output_field("org_"+field,
xmlh.get_tag_val(org, field))
for field in CSV_REPEATED_FIELDS:
outstr += FIELDSEP
fieldval = opp.getElementsByTagName(field)
val = ""
if (fieldval.length > 0):
val = flatten_to_csv(fieldval[0])
outstr += output_field(field, val)
# abstract
outstr += FIELDSEP
outstr += output_field("abstract", get_abstract(opp))
# orgLocation
outstr += FIELDSEP
fieldval = opp.getElementsByTagName("orgLocation")
if (fieldval.length > 0):
outstr += output_loc_field(fieldval[0], "orgLocation")
else:
outstr += output_field("orgLocation", "")
# hidden_details
outstr += FIELDSEP
fieldval = opp.getElementsByTagName("hiddenDetails")
if (fieldval.length > 0):
outstr += output_field(fieldval[0], "hidden_details")
else:
outstr += output_field("hidden_details", "some hidden text. asdfghjkl.")
return outstr
def get_base_other_fields(opp, org):
"""These are fields that exist in other Base schemas-- for the sake of
possible syndication, we try to make ourselves look like other Base
feeds. Since we're talking about a small overlap, these fields are
populated *as well as* direct mapping of the footprint XML fields."""
if ABRIDGED:
outstr = output_field("employer", xmlh.get_tag_val(org, "name"))
return outstr
outstr = output_field("quantity",
xmlh.get_tag_val(opp, "volunteersNeeded"))
outstr += FIELDSEP + output_field("employer",
xmlh.get_tag_val(org, "name"))
outstr += FIELDSEP + output_field("image_link",
xmlh.get_tag_val(org, "logoURL"))
# don't map expiration_date -- Base has strict limits (e.g. 2 weeks)
return outstr
def get_event_reqd_fields(opp):
"""Fields required by Google Base, note that they aren't necessarily
used by the FP app."""
outstr = output_tag_value(opp, "title")
outstr += FIELDSEP + output_tag_value(opp, "description")
outstr += FIELDSEP + output_field("link", BASE_PUB_URL)
return outstr
def get_feed_fields(feedinfo):
"""Fields from the <Feed> portion of FPXML."""
if ABRIDGED:
outstr = output_tag_value_renamed(feedinfo,
"providerName", "feed_providerName")
return outstr
outstr = output_tag_value(feedinfo, "feedID")
outstr += FIELDSEP + output_tag_value_renamed(
feedinfo, "providerID", "feed_providerID")
outstr += FIELDSEP + output_tag_value_renamed(
feedinfo, "providerName", "feed_providerName")
outstr += FIELDSEP + output_tag_value_renamed(
feedinfo, "providerURL", "feed_providerURL")
outstr += FIELDSEP + output_tag_value_renamed(
feedinfo, "description", "feed_description")
outstr += FIELDSEP + output_tag_value_renamed(
feedinfo, "createdDateTime", "feed_createdDateTime")
return outstr
GEOCODE_DEBUG = False
GEOCODE_CACHE = None
RGEOCODE_CACHE = None
GEOCODE_CACHE_FN = "geocode_cache.txt"
def approx_latlng(latlng):
res = re.sub(r'([.]\d\d)\d+', r'\1', latlng)
#print "approx_latlng("+latlng+")="+res
return res
def cleanup_geocode_query(query):
return re.sub(r'\s\s+', r' ', re.sub(r'\\[tnrfv]', r' ', query)).lower().strip()
def load_geocode_cache():
global GEOCODE_CACHE, RGEOCODE_CACHE
GEOCODE_CACHE = {}
RGEOCODE_CACHE = {}
geocode_fh = open(GEOCODE_CACHE_FN, "r")
try:
for line in geocode_fh:
if "|" in line:
key, val = line.split("|")
key = cleanup_geocode_query(key)
latlng = val.strip()
GEOCODE_CACHE[key] = latlng
RGEOCODE_CACHE[approx_latlng(latlng)] = key
if GEOCODE_DEBUG and len(GEOCODE_CACHE) % 2000 == 0:
print "read", len(GEOCODE_CACHE), "geocode cache entries."
finally:
geocode_fh.close()
def geocode_call(query, retries, parsefunc):
# geocode with google maps, and cache responses
params = urllib.urlencode(
{'q':query, 'output':'csv', 'oe':'utf8', 'sensor':'false',
'key':'ABQIAAAAxq97AW0x5_CNgn6-nLxSrxQuOQhskTx7t90ovP5xOuY' + \
'_YrlyqBQajVan2ia99rD9JgAcFrdQnTD4JQ'})
if GEOCODE_DEBUG:
print datetime.now(), "(reverse)geocoding '" + query + "'..."
maps_fh = urllib.urlopen("http://maps.google.com/maps/geo?%s" % params)
res = maps_fh.readline()
maps_fh.close()
if GEOCODE_DEBUG:
print datetime.now(), "response: "+res
if "," not in res:
# fail and also don't cache
return ""
respcode, zoom, val = parsefunc(res)
zoom = zoom # shutup pylint
if respcode == 500 or respcode == 620:
if GEOCODE_DEBUG:
print datetime.now(), "geocoder quota exceeded-- sleeping..."
time.sleep(1)
return geocode_call(query, retries - 1, parsefunc)
# these results get cached
geocode_fh = open(GEOCODE_CACHE_FN, "a")
if re.match(r'[0-9.+-]+,[0-9.+-]+', val):
# gecoding
cacheline = query + "|" + val
GEOCODE_CACHE[query] = val
RGEOCODE_CACHE[approx_latlng(val)] = query
else:
# reverse geocoding
cacheline = val + "|" + query
GEOCODE_CACHE[val] = query
RGEOCODE_CACHE[approx_latlng(query)] = val
if GEOCODE_DEBUG:
print datetime.now(), "storing cacheline: "+cacheline
else:
print_progress("storing cacheline: "+cacheline)
geocode_fh.write(cacheline + "\n")
geocode_fh.close()
return val
def reverse_geocode(latlng, retries=4):
global RGEOCODE_CACHE
latlng = cleanup_geocode_query(latlng)
latlng = re.sub(r'\s', '', latlng)
if RGEOCODE_CACHE == None:
load_geocode_cache()
if approx_latlng(latlng) in RGEOCODE_CACHE:
return RGEOCODE_CACHE[approx_latlng(latlng)]
def parsefunc(response):
# 200,8,"1475 Broadway, New York, NY 10036, USA"
match = re.search(r'(\d+),(\d+),"(.+)"', response)
if match:
respcode = int(match.group(1))
zoom = int(match.group(2))
loc = match.group(3)
# TODO: total hack to extract the city granularity
loc = re.sub(r'^.+,([^,]+,[^,]+,[^,]+)$', r'\1', loc).strip()
return respcode, zoom, loc
if GEOCODE_DEBUG:
print datetime.now(), "unparseable response: "+response[0:80]
return 999, 0, ""
return geocode_call(latlng, retries, parsefunc)
def geocode(addr, retries=4):
"""convert a string addr to a "lat,long" string"""
global GEOCODE_CACHE
addr = cleanup_geocode_query(addr)
# ignore leading/trailing punctuation
addr = re.sub(r'^[^0-9a-z]+', r'', addr)
addr = re.sub(r'[^0-9a-z]+$', r'', addr)
if GEOCODE_CACHE == None:
load_geocode_cache()
if addr in GEOCODE_CACHE:
return GEOCODE_CACHE[addr]
def parsefunc(locstr):
match = re.search(r'(\d+),(\d+),([0-9.+-]+,[0-9.+-]+)', locstr)
if match:
respcode = int(match.group(1))
zoom = int(match.group(2))
latlng = match.group(3)
return respcode, zoom, latlng
if GEOCODE_DEBUG:
print datetime.now(), "unparseable response: "+locstr[0:80]
return 999, 0, UNKNOWN_LATLNG
return geocode_call(addr, retries, parsefunc)
def output_opportunity(opp, feedinfo, known_orgs, totrecs):
"""main function for outputting a complete opportunity."""
outstr = ""
opp_id = xmlh.get_tag_val(opp, "volunteerOpportunityID")
if (opp_id == ""):
print_progress("no opportunityID")
return totrecs, ""
org_id = xmlh.get_tag_val(opp, "sponsoringOrganizationID")
if (org_id not in known_orgs):
print_progress("unknown sponsoringOrganizationID: " +\
org_id + ". skipping opportunity " + opp_id)
return totrecs, ""
org = known_orgs[org_id]
opp_locations = opp.getElementsByTagName("location")
opp_times = opp.getElementsByTagName("dateTimeDuration")
repeated_fields = get_repeated_fields(feedinfo, opp, org)
if len(opp_times) == 0:
opp_times = [ None ]
for opptime in opp_times:
if opptime == None:
startend = convert_dt_to_gbase("1971-01-01", "00:00:00-00:00", "UTC")
openended = "Yes"
else:
# event_date_range
# e.g. 2006-12-20T23:00:00/2006-12-21T08:30:00, in PST (GMT-8)
start_date = xmlh.get_tag_val(opptime, "startDate")
start_time = xmlh.get_tag_val(opptime, "startTime")
end_date = xmlh.get_tag_val(opptime, "endDate")
end_time = xmlh.get_tag_val(opptime, "endTime")
openended = xmlh.get_tag_val(opptime, "openEnded")
# e.g. 2006-12-20T23:00:00/2006-12-21T08:30:00, in PST (GMT-8)
if (start_date == ""):
start_date = "1971-01-01"
start_time = "00:00:00-00:00"
startend = convert_dt_to_gbase(start_date, start_time, "UTC")
if (end_date != "" and end_date + end_time > start_date + start_time):
startend += "/"
startend += convert_dt_to_gbase(end_date, end_time, "UTC")
duration = xmlh.get_tag_val(opptime, "duration")
hrs_per_week = xmlh.get_tag_val(opptime, "commitmentHoursPerWeek")
time_fields = get_time_fields(openended, duration, hrs_per_week, startend)
if len(opp_locations) == 0:
opp_locations = [ None ]
for opploc in opp_locations:
totrecs = totrecs + 1
if PROGRESS and totrecs % 250 == 0:
print_progress(str(totrecs)+" records generated.")
if opploc == None:
locstr, latlng, geocoded_loc = ("", "", "")
loc_fields = get_loc_fields("0.0", "0.0", "0.0", "", "")
else:
locstr, latlng, geocoded_loc = lookup_loc_fields(opploc)
lat = lng = "0.0"
if latlng != "":
lat, lng = latlng.split(",")
loc_fields = get_loc_fields("", str(float(lat)+1000.0),
str(float(lng)+1000.0), geocoded_loc,
xmlh.get_tag_val(opploc, "name"))
#if locstr != geocoded_loc:
# #print datetime.now(), "locstr: ", locstr, " geocoded_loc: ", \
# # geocoded_loc
# descs = opp.getElementsByTagName("description")
# encoded_locstr = escape(locstr)
# encoded_locstr = unicode(encoded_locstr,errors="ignore")
# encoded_locstr = encoded_locstr.encode('utf-8', "ignore")
# descs[0].firstChild.data += ". detailed location information: "
# descs[0].firstChild.data += encoded_locstr
opp_id = compute_stable_id(opp, org, locstr, openended, duration,
hrs_per_week, startend)
outstr += output_field("id", opp_id)
outstr += repeated_fields
outstr += time_fields
outstr += loc_fields
outstr += RECORDSEP
return totrecs, outstr
def get_time_fields(openended, duration, hrs_per_week,
event_date_range):
"""output time-related fields, e.g. for multiple times per event."""
if ABRIDGED:
time_fields = FIELDSEP + output_field("event_date_range", event_date_range)
return time_fields
time_fields = FIELDSEP + output_field("openended", openended)
time_fields += FIELDSEP + output_field("duration", duration)
time_fields += FIELDSEP + output_field("commitmentHoursPerWeek", hrs_per_week)
time_fields += FIELDSEP + output_field("event_date_range", event_date_range)
return time_fields
def get_loc_fields(location, latitude, longitude, location_string,
venue_name):
"""output location-related fields, e.g. for multiple locations per event."""
if ABRIDGED:
loc_fields = FIELDSEP + output_field("location", location)
loc_fields += FIELDSEP + output_field("latitude", latitude)
loc_fields += FIELDSEP + output_field("longitude", longitude)
loc_fields += FIELDSEP + output_field("location_string", location_string)
return loc_fields
loc_fields = FIELDSEP + output_field("location", location)
loc_fields += FIELDSEP + output_field("latitude", latitude)
loc_fields += FIELDSEP + output_field("longitude", longitude)
loc_fields += FIELDSEP + output_field("location_string", location_string)
loc_fields += FIELDSEP + output_field("venue_name", venue_name)
return loc_fields
def get_repeated_fields(feedinfo, opp, org):
"""output all fields that are repeated for each time and location."""
repeated_fields = FIELDSEP + get_feed_fields(feedinfo)
repeated_fields += FIELDSEP + get_event_reqd_fields(opp)
repeated_fields += FIELDSEP + get_base_other_fields(opp, org)
repeated_fields += FIELDSEP + get_direct_mapped_fields(opp, org)
return repeated_fields
def output_header(feedinfo, opp, org):
"""fake opportunity printer, which prints the header line instead."""
global PRINTHEAD, HEADER_ALREADY_OUTPUT
# no matter what, only print the header once!
if HEADER_ALREADY_OUTPUT:
return ""
HEADER_ALREADY_OUTPUT = True
PRINTHEAD = True
outstr = output_field("id", "")
repeated_fields = get_repeated_fields(feedinfo, opp, org)
time_fields = get_time_fields("", "", "", "")
loc_fields = get_loc_fields("", "", "", "", "")
PRINTHEAD = False
return outstr + repeated_fields + time_fields + loc_fields + RECORDSEP
def convert_to_footprint_xml(instr, do_fastparse, maxrecs, progress):
"""macro for parsing an FPXML string to XML then format it."""
#if False:
# # grr: RAM explosion, even with pulldom...
# totrecs = 0
# nodes = xml.dom.pulldom.parseString(instr)
# outstr = '<?xml version="1.0" ?>'
# outstr += '<FootprintFeed schemaVersion="0.1">'
# for eltype, node in nodes:
# if eltype == 'START_ELEMENT':
# if node.nodeName == 'VolunteerOpportunity':
# if progress and totrecs > 0 and totrecs % 250 == 0:
# print datetime.now(), ": ", totrecs, " opps processed."
# totrecs = totrecs + 1
# if maxrecs > 0 and totrecs > maxrecs:
# break
# if (node.nodeName == 'FeedInfo' or
# node.nodeName == 'Organization' or
# node.nodeName == 'VolunteerOpportunity'):
# nodes.expandNode(node)
# prettyxml = xmlh.prettyxml(node)
# outstr += prettyxml
# outstr += '</FootprintFeed>'
# return outstr
if do_fastparse:
res, numorgs, numopps = parse_footprint.parse_fast(instr, maxrecs, progress)
return res
else:
# slow parse
xmldoc = parse_footprint.parse(instr, maxrecs, progress)
# TODO: maxrecs
return xmlh.prettyxml(xmldoc)
def convert_to_gbase_events_type(instr, origname, fastparse, maxrecs, progress):
"""non-trivial logic for converting FPXML to google base formatting."""
# todo: maxrecs
outstr = ""
print_progress("convert_to_gbase_events_type...", "", progress)
example_org = None
known_orgs = {}
if fastparse:
known_elnames = [
'FeedInfo', 'FootprintFeed', 'Organization', 'Organizations',
'VolunteerOpportunities', 'VolunteerOpportunity', 'abstract',
'audienceTag', 'audienceTags', 'categoryTag', 'categoryTags',
'city', 'commitmentHoursPerWeek', 'contactEmail', 'contactName',
'contactPhone', 'country', 'createdDateTime', 'dateTimeDuration',
'dateTimeDurationType', 'dateTimeDurations', 'description',
'detailURL', 'directions', 'donateURL', 'duration', 'email',
'endDate', 'endTime', 'expires', 'fax', 'feedID', 'guidestarID',
'iCalRecurrence', 'language', 'latitude', 'lastUpdated', 'location',
'locationType', 'locations', 'logoURL', 'longitude', 'minimumAge',
'missionStatement', 'name', 'nationalEIN', 'openEnded',
'organizationID', 'organizationURL', 'paid', 'phone', 'postalCode',
'providerID', 'providerName', 'providerURL', 'region', 'schemaVersion',
'sexRestrictedEnum', 'sexRestrictedTo', 'skills',
'sponsoringOrganizationID', 'startDate', 'startTime', 'streetAddress1',
'streetAddress2', 'streetAddress3', 'title', 'tzOlsonPath', 'virtual',
'volunteerHubOrganizationID', 'volunteerOpportunityID',
'volunteersFilled', 'volunteersSlots', 'volunteersNeeded', 'yesNoEnum'
]
numopps = 0
feedchunks = re.findall(
re.compile('<FeedInfo>.+?</FeedInfo>', re.DOTALL), instr)
for feedchunk in feedchunks:
print_progress("found FeedInfo.", progress=progress)
feedinfo = xmlh.simple_parser(feedchunk, known_elnames, False)
orgchunks = re.findall(
re.compile('<Organization>.+?</Organization>', re.DOTALL), instr)
for orgchunk in orgchunks:
if progress and len(known_orgs) % 250 == 0:
print_progress(str(len(known_orgs))+" organizations seen.")
org = xmlh.simple_parser(orgchunk, known_elnames, False)
org_id = xmlh.get_tag_val(org, "organizationID")
if (org_id != ""):
known_orgs[org_id] = org
if example_org == None:
example_org = org
oppchunks = re.findall(
re.compile('<VolunteerOpportunity>.+?</VolunteerOpportunity>',
re.DOTALL), instr)
for oppchunk in oppchunks:
opp = xmlh.simple_parser(oppchunk, None, False)
if not HEADER_ALREADY_OUTPUT:
outstr = output_header(feedinfo, opp, example_org)
numopps, spiece = output_opportunity(opp, feedinfo, known_orgs, numopps)
outstr += spiece
if (maxrecs > 0 and numopps > maxrecs):
break
## note: preserves order, so diff works (vs. one sweep per element type)
#chunks = re.findall(
# re.compile('<(?:Organization|VolunteerOpportunity|FeedInfo)>.+?'+
# '</(?:Organization|VolunteerOpportunity|FeedInfo)>',
# re.DOTALL), instr)
#for chunk in chunks:
# node = xmlh.simple_parser(chunk, known_elnames, False)
# if re.search("<FeedInfo>", chunk):
# print_progress("found FeedInfo.", progress=progress)
# feedinfo = xmlh.simple_parser(chunk, known_elnames, False)
# continue
# if re.search("<Organization>", chunk):
# if progress and len(known_orgs) % 250 == 0:
# print_progress(str(len(known_orgs))+" organizations seen.")
# org = xmlh.simple_parser(chunk, known_elnames, False)
# org_id = xmlh.get_tag_val(org, "organizationID")
# if (org_id != ""):
# known_orgs[org_id] = org
# if example_org == None:
# example_org = org
# continue
# if re.search("<VolunteerOpportunity>", chunk):
# global HEADER_ALREADY_OUTPUT
# opp = xmlh.simple_parser(chunk, None, False)
# if numopps == 0:
# # reinitialize
# outstr = output_header(feedinfo, node, example_org)
# numopps, spiece = output_opportunity(opp, feedinfo, known_orgs, numopps)
# outstr += spiece
# if (maxrecs > 0 and numopps > maxrecs):
# break
#numopps = 0
#nodes = xml.dom.pulldom.parseString(instr)
#example_org = None
#for type,node in nodes:
# if type == 'START_ELEMENT':
# if node.nodeName == 'FeedInfo':
# nodes.expandNode(node)
# feedinfo = node
# elif node.nodeName == 'Organization':
# nodes.expandNode(node)
# id = xmlh.get_tag_val(node, "organizationID")
# if (id != ""):
# known_orgs[id] = node
# if example_org == None:
# example_org = node
# elif node.nodeName == 'VolunteerOpportunity':
# nodes.expandNode(node)
# if numopps == 0:
# outstr += output_header(feedinfo, node, example_org)
# numopps, spiece = output_opportunity(node, feedinfo,
# known_orgs, numopps)
# outstr += spiece
else:
# not fastparse
footprint_xml = parse_footprint.parse(instr, maxrecs, progress)
feedinfos = footprint_xml.getElementsByTagName("FeedInfo")
if (feedinfos.length != 1):
print datetime.now(), "bad FeedInfo: should only be one section"
# TODO: throw error
sys.exit(1)
feedinfo = feedinfos[0]
organizations = footprint_xml.getElementsByTagName("Organization")
for org in organizations:
org_id = xmlh.get_tag_val(org, "organizationID")
if (org_id != ""):
known_orgs[org_id] = org
opportunities = footprint_xml.getElementsByTagName("VolunteerOpportunity")
numopps = 0
for opp in opportunities:
if numopps == 0:
outstr += output_header(feedinfo, opp, organizations[0])
numopps, spiece = output_opportunity(opp, feedinfo, known_orgs, numopps)
outstr += spiece
return outstr, len(known_orgs), numopps
def guess_shortname(filename):
"""from the input filename, guess which feed this is."""
if re.search("usa-?service", filename):
return "usaservice"
if re.search(r'meetup', filename):
return "meetup"
if re.search(r'barackobama[.]com', filename):
return "mybarackobama"
if re.search(r'united.*way', filename):
return "unitedway"
if re.search("habitat", filename):
return "habitat"
if re.search("americansolutions", filename):
return "americansolutions"
if re.search("spreadsheets[.]google[.]com", filename):
return "gspreadsheet"
if re.search("(handson|hot.footprint)", filename):
return "handsonnetwork"
if re.search("(volunteer[.]gov)", filename):
return "volunteergov"
if re.search("(whichoneis.com|beextra[.]org)", filename):
return "extraordinaries"
if re.search("idealist", filename):
return "idealist"
if re.search("(userpostings|/export/Posting)", filename):
return "footprint_userpostings"
if re.search("craigslist", filename):
return "craigslist"
if re.search("americorps", filename):
return "americorps"
if re.search("servenet", filename):
return "servenet"
if re.search("volunteermatch", filename):
return "volunteermatch"
return ""
def ftp_to_base(filename, ftpinfo, instr):
"""ftp the string to base, guessing the feed name from the orig filename."""
ftplib = __import__('ftplib')
stringio = __import__('StringIO')
dest_fn = guess_shortname(filename)
if dest_fn == "":
dest_fn = "footprint1.txt"
else:
dest_fn = dest_fn + "1.gz"
if re.search(r'[.]gz$', dest_fn):
print_progress("compressing data from "+str(len(instr))+" bytes", filename)
gzip_fh = gzip.open(dest_fn, 'wb', 9)
gzip_fh.write(instr)
gzip_fh.close()
data_fh = open(dest_fn, 'rb')
else:
data_fh = stringio.StringIO(instr)
host = 'uploads.google.com'
(user, passwd) = ftpinfo.split(":")
print_progress("connecting to " + host + " as user " + user + "...", filename)
ftp = ftplib.FTP(host)
welcomestr = re.sub(r'\n', '\\n', ftp.getwelcome())
print_progress("FTP server says: "+welcomestr, filename)
ftp.login(user, passwd)
print_progress("uploading filename "+dest_fn, filename)
success = False
while not success:
try:
ftp.storbinary("STOR " + dest_fn, data_fh, 8192)
success = True
except:
# probably ftplib.error_perm: 553: Permission denied on server. (Overwrite)
print_progress("upload failed-- sleeping and retrying...")
time.sleep(1)
if success:
print_progress("done uploading.")
else:
print_progress("giving up.")
ftp.quit()
data_fh.close()
def guess_parse_func(inputfmt, filename):
"""from the filename and the --inputfmt,guess the input type and parse func"""
if inputfmt == "fpxml" or re.search(r'fpxml', filename):
return "fpxml", parse_footprint.parse
if (inputfmt == "gspreadsheet" or
(inputfmt == None and
re.search(r'spreadsheets[.]google[.]com', filename))):
return "gspreadsheet", parse_gspreadsheet.parse
if (inputfmt == "usaservice" or inputfmt == "usasvc" or
(inputfmt == None and re.search(r'usa-?service', filename))):
return "usaservice", parse_usaservice.parse
if (inputfmt == "craigslist" or inputfmt == "cl" or
(inputfmt == None and re.search(r'craigslist', filename))):
return "craigslist", parse_craigslist.parse
if (inputfmt == "americorps" or
(inputfmt == None and re.search(r'americorps', filename))):
return "americorps", parse_americorps.parse
if (inputfmt == "servenet" or
(inputfmt == None and re.search(r'servenet', filename))):
return "servenet", parse_servenet.parse
if (inputfmt == "handson" or inputfmt == "handsonnetwork"):
return "handsonnetwork", parse_handsonnetwork.parse
if (inputfmt == None and re.search(r'united.*way', filename)):
return "fpxml", parse_footprint.parse
if (inputfmt == None and re.search(r'barackobama[.]com', filename)):
return "fpxml", parse_footprint.parse
if (inputfmt == None and re.search(r'(handson|hot.footprint)', filename)):
# now using FPXML
#parsefunc = parse_handsonnetwork.ParseFPXML
return "fpxml", parse_footprint.parse
if (inputfmt == None and re.search(r'habitat', filename)):
def parse_habitat(instr, maxrecs, progress):
# fixup bad escaping
newstr = re.sub(r'&code=', '&code=', instr)
return parse_footprint.parse_fast(newstr, maxrecs, progress)
return "badfpxml", parse_habitat
if (inputfmt == None and re.search(r'volunteer[.]gov', filename)):
return "fpxml", parse_footprint.parse
if (inputfmt == None and re.search(r'americansolutions', filename)):
return "fpxml", parse_footprint.parse
if (inputfmt == None and re.search(r'(whichoneis[.]com|beextra[.]org)',
filename)):
return "fpxml", parse_footprint.parse
if inputfmt == "idealist":
return "idealist", parse_idealist.parse
if (inputfmt == None and re.search(r'idealist', filename)):
return "fpxml", parse_footprint.parse
if (inputfmt == None and re.search(r'meetup', filename)):
return "fpxml", parse_footprint.parse
if (inputfmt == "fp_userpostings" or
(inputfmt == None and re.search(r'(userpostings|/export/Posting)',
filename))):
return "fp_userpostings", parse_userpostings.parse
if (inputfmt == "volunteermatch" or inputfmt == "vm" or
(inputfmt == None and re.search(r'volunteermatch', filename))):
return "volunteermatch", parse_volunteermatch.parse
print datetime.now(), "unknown input format-- try --inputfmt"
sys.exit(1)
def clean_input_string(instr):
"""run various cleanups for low-level encoding issues."""
def cleaning_progress(msg):
"""macro"""
print_progress(msg+": "+str(len(instr))+" bytes.")
cleaning_progress("read file")
instr = re.sub(r'\r\n?', "\n", instr)
cleaning_progress("filtered DOS newlines")
instr = re.sub(r'(?:\t|	)', " ", instr)
cleaning_progress("filtered tabs")
instr = re.sub(r'\xc2?[\x93\x94\222]', "'", instr)
cleaning_progress("filtered iso8859-1 single quotes")
instr = re.sub(r'\xc2?[\223\224]', '"', instr)
cleaning_progress("filtered iso8859-1 double quotes")
instr = re.sub(r'\xc2?[\225\226\227]', "-", instr)
cleaning_progress("filtered iso8859-1 dashes")
instr = xmlh.clean_string(instr)
cleaning_progress("filtered nonprintables")
instr = re.sub(r'&[a-z]+;', '', instr)
cleaning_progress("filtered weird X/HTML escapes")
return instr
def parse_options():
"""parse cmdline options"""
global DEBUG, PROGRESS, GEOCODE_DEBUG, FIELDSEP, RECORDSEP, ABRIDGED
global OUTPUTFMT
parser = OptionParser("usage: %prog [options] sample_data.xml ...")
parser.set_defaults(geocode_debug=False)
parser.set_defaults(debug=False)
parser.set_defaults(abridged=False)
parser.set_defaults(progress=False)
parser.set_defaults(debug_input=False)
parser.set_defaults(outputfmt="basetsv")
parser.set_defaults(output="")
parser.set_defaults(test=False)
parser.set_defaults(clean=True)
parser.set_defaults(maxrecs=-1)
parser.add_option("-d", "--dbg", action="store_true", dest="debug")
parser.add_option("--abridged", action="store_true", dest="abridged")
parser.add_option("--noabridged", action="store_false", dest="abridged")
parser.add_option("--clean", action="store_true", dest="clean")
parser.add_option("--noclean", action="store_false", dest="clean")
parser.add_option("--inputfmt", action="store", dest="inputfmt")
parser.add_option("--test", action="store_true", dest="test")
parser.add_option("--dbginput", action="store_true", dest="debug_input")
parser.add_option("--progress", action="store_true", dest="progress")
parser.add_option("--outputfmt", action="store", dest="outputfmt")
parser.add_option("--output", action="store", dest="output")
parser.add_option("-g", "--geodbg", action="store_true", dest="geocode_debug")
parser.add_option("--ftpinfo", dest="ftpinfo")
parser.add_option("--fs", "--fieldsep", action="store", dest="fs")
parser.add_option("--rs", "--recordsep", action="store", dest="rs")
parser.add_option("-n", "--maxrecords", action="store", dest="maxrecs")
(options, args) = parser.parse_args(sys.argv[1:])
if (len(args) == 0):
parser.print_help()
sys.exit(0)
if options.fs != None:
FIELDSEP = options.fs
if options.rs != None:
RECORDSEP = options.rs
if (options.debug):
DEBUG = True
GEOCODE_DEBUG = True
PROGRESS = True
FIELDSEP = "\n"
if (options.abridged):
ABRIDGED = True
if (options.geocode_debug):
GEOCODE_DEBUG = True
if options.test:
options.progress = True
if (options.progress):
PROGRESS = True
if options.ftpinfo and not options.outputfmt:
options.outputfmt = "basetsv"
OUTPUTFMT = options.outputfmt
return options, args
def open_input_filename(filename):
"""handle different file/URL opening methods."""
if re.search(r'^https?://', filename):
print_progress("starting download of "+filename)
outfh = urllib.urlopen(filename)
if (re.search(r'[.]gz$', filename)):
# is there a way to fetch and unzip an URL in one shot?
print_progress("ah, gzip format.")
content = outfh.read()
outfh.close()
print_progress("download done.")
tmp_fn = "/tmp/tmp-"+hashlib.md5().hexdigest()
tmpfh = open(tmp_fn, "wb+")
tmpfh.write(content)
tmpfh.close()
outfh = gzip.open(tmp_fn, 'rb')
return outfh
elif re.search(r'[.]gz$', filename):
return gzip.open(filename, 'rb')
elif filename == "-":
return sys.stdin
return open(filename, 'rb')
def test_parse(footprint_xmlstr, maxrecs):
"""run the data through and then re-parse the output."""
print datetime.now(), "testing input: generating Footprint XML..."
fpxml = convert_to_footprint_xml(footprint_xmlstr, True, int(maxrecs), True)
# free some RAM
del footprint_xmlstr
print datetime.now(), "testing input: parsing and regenerating FPXML..."
fpxml2 = convert_to_footprint_xml(fpxml, True, int(maxrecs), True)
print datetime.now(), "testing input: comparing outputs..."
hash1 = hashlib.md5(fpxml).hexdigest()
hash2 = hashlib.md5(fpxml2).hexdigest()
fn1 = "/tmp/pydiff-"+hash1
fn2 = "/tmp/pydiff-"+hash2
if hash1 == hash2:
print datetime.now(), "success: getting head...\n"
outfh = open(fn1, "w+")
outfh.write(fpxml)
outfh.close()
subprocess.call(['head', fn1])
else:
print datetime.now(), "errors-- hash1=" + hash1 + " hash2=" + \
hash2 + " running diff", fn1, fn2
outfh = open(fn1, "w+")
outfh.write(fpxml)
outfh.close()
outfh = open(fn2, "w+")
outfh.write(fpxml2)
outfh.close()
subprocess.call(['diff', fn1, fn2])
# grr-- difflib performance sucks
#for line in difflib.unified_diff(fpxml, fpxml2, \
# fromfile='(first output)', tofile='(second output)'):
#print line
def process_file(filename, options, providerName="", providerID="",
providerURL=""):
shortname = guess_shortname(filename)
inputfmt, parsefunc = guess_parse_func(options.inputfmt, filename)
infh = open_input_filename(filename)
print_progress("reading data...")
# don't put this inside open_input_filename() because it could be large
instr = infh.read()
print_progress("done reading data.")
# remove bad encodings etc.
if options.clean:
instr = clean_input_string(instr)
# split nasty XML inputs, to help isolate problems
if options.debug_input:
instr = re.sub(r'><', r'>\n<', instr)
print_progress("inputfmt: "+inputfmt)
print_progress("outputfmt: "+options.outputfmt)
print_status("input data: "+str(len(instr))+" bytes", shortname)
if inputfmt == "fpxml":
footprint_xmlstr = instr
else:
print_progress("parsing "+inputfmt+"...")
assert parsefunc != parse_footprint.parse
footprint_xmlstr, numorgs, numopps = \
parsefunc(instr, int(options.maxrecs), PROGRESS)
if (providerID != "" and
footprint_xmlstr.find('<providerID></providerID>')):
footprint_xmlstr = re.sub(
'<providerID></providerID>',
'<providerID>%s</providerID>' % providerID, footprint_xmlstr)
if (providerName != "" and
footprint_xmlstr.find('<providerName></providerName>')):
footprint_xmlstr = re.sub(
'<providerName></providerName>',
'<providerName>%s</providerName>' % providerName, footprint_xmlstr)
if (providerURL != "" and
footprint_xmlstr.find('<providerURL></providerURL>')):
footprint_xmlstr = re.sub(
'<providerURL></providerURL>',
'<providerURL>%s</providerURL>' % providerURL, footprint_xmlstr)
if options.test:
# free some RAM
del instr
test_parse(footprint_xmlstr, options.maxrecs)
sys.exit(0)
fastparse = not options.debug_input
if OUTPUTFMT == "fpxml":
# TODO: pretty printing option
print convert_to_footprint_xml(footprint_xmlstr, fastparse,
int(options.maxrecs), PROGRESS)
sys.exit(0)
if OUTPUTFMT != "basetsv":
print >> sys.stderr, datetime.now(), \
"--outputfmt not implemented: try 'basetsv','fpbasetsv' or 'fpxml'"
sys.exit(1)
outstr, numorgs, numopps = convert_to_gbase_events_type(
footprint_xmlstr, shortname, fastparse, int(options.maxrecs), PROGRESS)
return len(footprint_xmlstr), numorgs, numopps, outstr
def main():
"""main function for cmdline execution."""
start_time = datetime.now()
options, args = parse_options()
filename = args[0]
if re.search("spreadsheets[.]google[.]com", filename):
if OUTPUTFMT == "fpxml":
pgs.parser_error("FPXML format not supported for "+
"spreadsheet-of-spreadsheets")
sys.exit(1)
match = re.search(r'key=([^& ]+)', filename)
url = "http://spreadsheets.google.com/feeds/cells/" + match.group(1)
url += "/1/public/basic"
# to avoid hitting 80 columns
pgs = parse_gspreadsheet
data = {}
updated = {}
if PROGRESS:
print "processing spreadsheet", url
maxrow, maxcol = pgs.read_gspreadsheet(url, data, updated, PROGRESS)
header_row, header_startcol = pgs.find_header_row(data, 'provider name')
# check to see if there's a header-description row
header_desc = pgs.cellval(data, header_row+1, header_startcol)
if not header_desc:
pgs.parser_error("blank row not allowed below header row")
sys.exit(1)
header_desc = header_desc.lower()
data_startrow = header_row + 1
if header_desc.find("example") >= 0:
data_startrow += 1
bytes = numorgs = numopps = 0
outstr = ""
for row in range(data_startrow, int(maxrow)+1):
providerName = pgs.cellval(data, row, header_startcol)
providerID = pgs.cellval(data, row, header_startcol+1)
providerURL = pgs.cellval(data, row, header_startcol+2)
if providerName == "" or providerID == "" or providerURL == "":
continue
match = re.search(r'key=([^& ]+)', providerURL)
providerURL = "http://spreadsheets.google.com/feeds/cells/"
providerURL += match.group(1)
providerURL += "/1/public/basic"
if PROGRESS:
print "processing spreadsheet", providerURL, "name="+providerName
providerBytes, providerNumorgs, providerNumopps, tmpstr = process_file(
providerURL, options, providerName, providerID, providerURL)
bytes += providerBytes
numorgs += providerNumorgs
numopps += providerNumopps
outstr += tmpstr
else:
bytes, numorgs, numopps, outstr = process_file(filename, options)
#only need this if Base quoted fields it enabled
#outstr = re.sub(r'"', r'"', outstr)
if (options.ftpinfo):
ftp_to_base(filename, options.ftpinfo, outstr)
elif options.output == "":
print outstr,
else:
outfh = open(options.output, "w")
outfh.write(outstr)
outfh.close()
elapsed = datetime.now() - start_time
# NOTE: if you change this, you also need to update datahub/load_gbase.py
# and frontend/views.py to avoid breaking the dashboard-- other status
# messages don't matter.
shortname = guess_shortname(filename)
xmlh.print_status("done parsing: output " + str(numorgs) + " organizations" +
" and " + str(numopps) + " opportunities" +
" (" + str(bytes) + " bytes): " +
str(int(elapsed.seconds/60)) + " minutes.",
shortname, PROGRESS)
if __name__ == "__main__":
main()
| Python |
#!/usr/bin/env python
#
"""
script for loading into googlebase.
Usage: load_gbase.py username password
"""
import sys
import re
import logging
import subprocess
from datetime import datetime
import footprint_lib
USERNAME = ""
PASSWORD = ""
LOGPATH = "/home/footprint/public_html/datahub/dashboard/"
LOG_FN = LOGPATH + "load_gbase.log"
DETAILED_LOG_FN = LOGPATH + "load_gbase_detail.log"
# this file needs to be copied over to frontend/autocomplete/
POPULAR_WORDS_FN = "popular_words.txt"
FIELD_STATS_FN = "field_stats.txt"
STOPWORDS = set([
'a', 'about', 'above', 'across', 'after', 'afterwards', 'again', 'against',
'all', 'almost', 'alone', 'along', 'already', 'also', 'although', 'always',
'am', 'among', 'amongst', 'amoungst', 'amount', 'an', 'and', 'another', 'any',
'anyhow', 'anyone', 'anything', 'anyway', 'anywhere', 'are', 'around', 'as',
'at', 'back', 'be', 'became', 'because', 'become', 'becomes', 'becoming',
'been', 'before', 'beforehand', 'behind', 'being', 'below', 'beside',
'besides', 'between', 'beyond', 'bill', 'both', 'bottom', 'but', 'by', 'call',
'can', 'cannot', 'cant', 'co', 'computer', 'con', 'could', 'couldnt', 'cry',
'de', 'describe', 'detail', 'do', 'done', 'down', 'due', 'during', 'each',
'eg', 'eight', 'either', 'eleven', 'else', 'elsewhere', 'empty', 'enough',
'etc', 'even', 'ever', 'every', 'everyone', 'everything', 'everywhere',
'except', 'few', 'fifteen', 'fify', 'fill', 'find', 'fire', 'first', 'five',
'for', 'former', 'formerly', 'forty', 'found', 'four', 'from', 'front','full',
'further', 'get', 'give', 'go', 'had', 'has', 'hasnt', 'have', 'he', 'hence',
'her', 'here', 'hereafter', 'hereby', 'herein', 'hereupon', 'hers', 'herself',
'him', 'himself', 'his', 'how', 'however', 'hundred', 'i', 'ie', 'if', 'in',
'inc', 'indeed', 'interest', 'into', 'is', 'it', 'its', 'itself', 'keep',
'last', 'latter', 'latterly', 'least', 'less', 'ltd', 'made', 'many', 'may',
'me', 'meanwhile', 'might', 'mill', 'mine', 'more', 'moreover', 'most',
'mostly', 'move', 'much', 'must', 'my', 'myself', 'name', 'namely', 'neither',
'never', 'nevertheless', 'next', 'nine', 'no', 'nobody', 'none', 'noone',
'nor', 'not', 'nothing', 'now', 'nowhere', 'of', 'off', 'often', 'on', 'once',
'one', 'only', 'onto', 'or', 'other', 'others', 'otherwise', 'our', 'ours',
'ourselves', 'out', 'over', 'own', 'part', 'per', 'perhaps', 'please', 'put',
'rather', 're', 'same', 'see', 'seem', 'seemed', 'seeming', 'seems',
'serious', 'several', 'she', 'should', 'show', 'side', 'since', 'sincere',
'six', 'sixty', 'so', 'some', 'somehow', 'someone', 'something', 'sometime',
'sometimes', 'somewhere', 'still', 'such', 'system', 'take', 'ten', 'than',
'that', 'the', 'their', 'them', 'themselves', 'then', 'thence', 'there',
'thereafter', 'thereby', 'therefore', 'therein', 'thereupon', 'these', 'they',
'thick', 'thin', 'third', 'this', 'those', 'though', 'three', 'through',
'throughout', 'thru', 'thus', 'to', 'together', 'too', 'top', 'toward',
'towards', 'twelve', 'twenty', 'two', 'un', 'under', 'until', 'up', 'upon',
'us', 'very', 'via', 'was', 'we', 'well', 'were', 'what', 'whatever', 'when',
'whence', 'whenever', 'where', 'whereafter', 'whereas', 'whereby', 'wherein',
'whereupon', 'wherever', 'whether', 'which', 'while', 'whither', 'who',
'whoever', 'whole', 'whom', 'whose', 'why', 'will', 'with', 'within',
'without', 'would', 'yet', 'you', 'your', 'yours', 'yourself', 'yourselves',
# custom stopwords for footprint
'url', 'amp', 'quot', 'help', 'http', 'search', 'nbsp', 'need', 'cache',
'vol', 'housingall', 'wantedall', 'personalsall', 'net', 'org', 'www',
'gov', 'yes', 'no', '999',
])
def print_progress(msg):
"""print progress message-- shutup pylint"""
print str(datetime.now())+": "+msg
KNOWN_WORDS = {}
def process_popular_words(content):
"""update the dictionary of popular words."""
# TODO: handle phrases (via whitelist, then later do something smart.
print_progress("cleaning content: %d bytes" % len(content))
cleaner_regexp = re.compile('<[^>]*>', re.DOTALL)
cleaned_content = re.sub(cleaner_regexp, '', content).lower()
print_progress("splitting words, %d bytes" % len(cleaned_content))
words = re.split(r'[^a-zA-Z0-9]+', cleaned_content)
print_progress("loading words")
for word in words:
# ignore common english words
if word in STOPWORDS:
continue
# ignore short words
if len(word) <= 2:
continue
if word not in KNOWN_WORDS:
KNOWN_WORDS[word] = 0
KNOWN_WORDS[word] += 1
print_progress("cleaning rare words from %d words" % len(KNOWN_WORDS))
# clean to reduce ram needs
for word in KNOWN_WORDS.keys():
if KNOWN_WORDS[word] < 2:
del KNOWN_WORDS[word]
print_progress("done: word dict size %d words" % len(KNOWN_WORDS))
def print_word_stats():
"""dump word stats."""
print_progress("final cleanse: keeping only words appearing 10 times")
for word in KNOWN_WORDS.keys():
if KNOWN_WORDS[word] < 10:
del KNOWN_WORDS[word]
sorted_words = list(KNOWN_WORDS.iteritems())
sorted_words.sort(cmp=lambda a, b: cmp(b[1], a[1]))
print_progress("writing "+POPULAR_WORDS_FN+"...")
popfh = open(POPULAR_WORDS_FN, "w")
for word, freq in sorted_words:
popfh.write(str(freq)+"\t"+word+"\n")
popfh.close()
print_progress("done writing "+POPULAR_WORDS_FN)
FIELD_VALUES = None
FIELD_NAMES = None
NUM_RECORDS_TOTAL = 0
def process_field_stats(content):
"""update the field-value histograms."""
global FIELD_NAMES, FIELD_VALUES, NUM_RECORDS_TOTAL
for lineno, line in enumerate(content.splitlines()):
fields = line.split("\t")
if lineno == 0:
if FIELD_NAMES == None:
FIELD_NAMES = fields
FIELD_VALUES = [{} for i in range(len(fields))]
continue
NUM_RECORDS_TOTAL += 1
for i, val in enumerate(fields):
val = val[0:300]
if val in FIELD_VALUES[i]:
FIELD_VALUES[i][val] += 1
else:
FIELD_VALUES[i][val] = 1
def print_field_stats():
"""dump field-value stats."""
print_progress("writing "+FIELD_STATS_FN+"...")
outfh = open(FIELD_STATS_FN, "w")
outfh.write("number of records: "+str(NUM_RECORDS_TOTAL)+"\n")
for i, fieldname in enumerate(FIELD_NAMES):
outfh.write("field "+fieldname+":\n")
sorted_vals = list(FIELD_VALUES[i].iteritems())
sorted_vals.sort(cmp=lambda a, b: cmp(b[1], a[1]))
for val, freq in sorted_vals[0:1000]:
if freq < 10:
break
outfh.write(" %5d %s\n" % (freq, val))
outfh.close()
print_progress("done writing "+FIELD_STATS_FN)
def append_log(outstr):
"""append to the detailed and truncated log, for stats collection."""
outfh = open(DETAILED_LOG_FN, "a")
outfh.write(outstr)
outfh.close()
outfh = open(LOG_FN, "a")
for line in outstr.split('\n'):
if re.search(r'(STATUS|ERROR)', line):
outfh.write(line+"\n")
outfh.close()
def error_exit(msg):
"""Print an error message to stderr and exit."""
print >> sys.stderr, msg
sys.exit(1)
# Use a shell for subcommands on Windows to get a PATH search.
USE_SHELL = sys.platform.startswith("win")
def run_shell_with_retcode(command, print_output=False,
universal_newlines=True):
"""Executes a command and returns the output from stdout and the return code.
Args:
command: Command to execute.
print_output: If True, the output is printed to stdout.
If False, both stdout and stderr are ignored.
universal_newlines: Use universal_newlines flag (default: True).
Returns:
Tuple (output, return code)
"""
logging.info("Running %s", command)
proc = subprocess.Popen(command, stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=USE_SHELL,
universal_newlines=universal_newlines)
if print_output:
output_array = []
while True:
line = proc.stdout.readline()
if not line:
break
print line.strip("\n")
output_array.append(line)
output = "".join(output_array)
else:
output = proc.stdout.read()
proc.wait()
errout = proc.stderr.read()
if print_output and errout:
print >> sys.stderr, errout
proc.stdout.close()
proc.stderr.close()
append_log(output)
append_log(errout)
return output, errout, proc.returncode
def run_shell(command, silent_ok=False, universal_newlines=True,
print_output=False):
"""run a shell command."""
stdout, stderr, retcode = run_shell_with_retcode(command, print_output,
universal_newlines)
#if retcode and retcode != 0:
#error_exit("Got error status from %s:\n%s\n%s" % (command, stdout, stderr))
if not silent_ok and not stdout:
error_exit("No output from %s" % command)
return stdout, stderr, retcode
def load_gbase(name, url):
"""shutup pylint."""
print_progress("loading "+name+" from "+url)
# run as a subprocess so we can ignore failures and keep going.
# later, we'll run these concurrently, but for now we're RAM-limited.
# ignore retcode
tsv_filename = "out-"+name+".tsv"
stdout, stderr, retcode = run_shell(["./footprint_lib.py", "--progress",
#"--ftpinfo", USERNAME+":"+PASSWORD,
"--output", tsv_filename, url],
silent_ok=True, print_output=False)
print stdout,
if stderr and stderr != "":
print name+":STDERR: ", re.sub(r'\n', '\n'+name+':STDERR: ', stderr)
if retcode and retcode != 0:
print name+":RETCODE: "+str(retcode)
infh = open(tsv_filename, "r")
tsv_data = infh.read()
infh.close()
process_field_stats(tsv_data)
process_popular_words(tsv_data)
print_progress("ftp'ing to base")
footprint_lib.ftp_to_base(name, USERNAME+":"+PASSWORD, tsv_data)
print_progress("load_gbase: done.")
def test_loaders():
"""for testing, read from local disk as much as possible."""
load_gbase("servenet", "servenet.xml")
load_gbase("unitedway", "unitedway.xml")
load_gbase("americansolutions", "americansolutions.xml")
load_gbase("meetup", "meetup.xml")
load_gbase("extraordinaries", "beextra-extraordinaries.xml")
load_gbase("idealist", "idealist.xml")
load_gbase("gspreadsheets",
"https://spreadsheets.google.com/ccc?key=rOZvK6aIY7HgjO-hSFKrqMw")
load_gbase("craigslist", "craigslist-cache.txt")
load_gbase("americorps", "americorps-xml_ac_recruitopps.xml.gz")
load_gbase("volunteer.gov", "volunteergov.xml")
load_gbase("handson", "hot.footprint.xml.gz")
def loaders():
"""put all loaders in one function for easier testing."""
load_gbase("mybarackobama",
"http://my.barackobama.com/page/event/search_results?"+
"format=footprint")
load_gbase("servenet",
"http://servenet.org/test/temp/SERVEnetOpportunities001.xml")
load_gbase("unitedway",
"http://volunteer.united-e-way.org/"+
"uwnyc/util/voml/uwnyc-footprint-pull.aspx")
load_gbase("habitat", "http://www.habitat.org/cd/gv/schedule_to_xml.aspx")
load_gbase("americansolutions",
"http://www.americansolutions.com/footprint/footprint.xml")
load_gbase("meetup", "http://api.meetup.com/footprint?"+
"key=2c24625a70343bb68451e337e714b22")
# old custom feed
#load_gbase("idealist", "http://feeds.idealist.org/xml/feeds/"+
# "Idealist-VolunteerOpportunity-VOLUNTEER_OPPORTUNITY_TYPE."+
# "en.open.atom.gz")
load_gbase("extraordinaries", "http://app.beextra.org/opps/list/format/xml")
load_gbase("idealist", "http://feeds.idealist.org/xml/"+
"footprint-volunteer-opportunities.xml")
load_gbase("gspreadsheets",
"https://spreadsheets.google.com/ccc?key=rOZvK6aIY7HgjO-hSFKrqMw")
# note: craiglist crawler is run async to this
load_gbase("craigslist", "craigslist-cache.txt")
load_gbase("americorps",
"http://www.americorps.gov/xmlfeed/xml_ac_recruitopps.xml.gz")
load_gbase("volunteer.gov", "http://www.volunteer.gov/footprint.xml")
load_gbase("handson",
"http://archive.handsonnetwork.org/feeds/hot.footprint.xml.gz")
def main():
"""shutup pylint."""
global USERNAME, PASSWORD
if len(sys.argv) < 3:
print "Usage:", sys.argv[0], "<gbase username> <password>"
sys.exit(1)
USERNAME = sys.argv[1]
PASSWORD = sys.argv[2]
#test_loaders()
loaders()
print_word_stats()
print_field_stats()
if __name__ == "__main__":
main()
| Python |
#!/usr/bin/python
# Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#http://usaservice.org/page/event/search_results?orderby=day&state=CA&country=US&event_type%5b%5d=&limit=1000&radius_unit=miles&format=commons_rss&wrap=no
from xml.dom import minidom
import sys
import os
import urllib
import re
import thread
import time
from datetime import datetime
import socket
DEFAULT_TIMEOUT = 30
socket.setdefaulttimeout(DEFAULT_TIMEOUT)
STATES = ['AA','AE','AK','AL','AP','AR','AS','AZ','CA','CO','CT','DC','DE','FL','FM','GA','GU','HI','IA','ID','IL','IN','KS','KY','LA','MA','MD','ME','MH','MI','MN','MO','MP','MS','MT','NC','ND','NE','NH','NJ','NM','NV','NY','OH','OK','OR','PA','PR','PW','RI','SC','SD','TN','TX','UT','VA','VI','VT','WA','WI','WV','WY','AB','BC','MB','NB','NL','NT','NS','NU','ON','PE','QC','SK','YT','na']
OUTPUT_FN = "usaservice.txt"
file_lock = thread.allocate_lock()
crawlers = 0
crawlers_lock = thread.allocate_lock()
def get_url(state):
url = "http://usaservice.org/page/event/search_results?orderby=day&state="
url += state+"&country=US&event_type%5b%5d=&limit=1000&radius_unit=miles&format=commons_rss&wrap=no"
return url
def crawl_state(state, ignore):
global crawlers, crawlers_lock, OUTPUT_FN, file_lock
crawlers_lock.acquire()
crawlers = crawlers + 1
crawlers_lock.release()
while crawlers > 10:
time.sleep(1)
try:
url = get_url(state)
fh = urllib.urlopen(url)
rss = fh.read()
fh.close()
items = re.findall(r'<item>.+?</item>', rss, re.DOTALL)
if len(items) > 0:
print datetime.now(), "found", len(items), "items for state", state
outstr = ""
for item in items:
item = re.sub(r'(?:\r?\n|\r)',' ', item)
if re.search(r'Find Money For Next 12 Months', item):
continue
outstr += item + "\n"
file_lock.acquire()
outfh = open(OUTPUT_FN, "a")
outfh.write(outstr)
outfh.close()
file_lock.release()
except:
pass
crawlers_lock.acquire()
crawlers = crawlers - 1
crawlers_lock.release()
from optparse import OptionParser
if __name__ == "__main__":
try:
os.unlink(OUTPUT_FN)
except:
pass
for state in STATES:
thread.start_new_thread(crawl_state, (state, "foo"))
# give them a chance to start
time.sleep(1)
while (crawlers > 0):
print datetime.now(), "waiting for", crawlers, "crawlers to finish."
time.sleep(1)
sys.exit(0)
| Python |
#!/usr/bin/python
# Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# TODO: remove silly dependency on dapper.net-- thought I'd need
# it for the full scrape, but ended up not going that way.
"""crawler for craigslist until they provide a real feed."""
from xml.dom import minidom
import sys
import os
import urllib
import re
import thread
import time
import datetime
import socket
DEFAULT_TIMEOUT = 10
socket.setdefaulttimeout(DEFAULT_TIMEOUT)
METROS_FN = "craigslist-metros.txt"
CACHE_FN = "craigslist-cache.txt"
pages = {}
page_lock = thread.allocate_lock()
crawlers = 0
crawlers_lock = thread.allocate_lock()
cachefile_lock = thread.allocate_lock()
# set to a lower number if you have problems
MAX_CRAWLERS = 40
def read_metros():
global metros
metros = {}
fh = open(METROS_FN, "r")
for line in fh:
url,name = line.split("|")
metros[url] = name
def crawl_metros():
#<geo dataType="RawString" fieldName="geo" href="http://waterloo.craigslist.org/" originalElement="a" type="field">waterloo / cedar falls</geo>
print "getting toplevel geos..."
fh = urllib.urlopen("http://www.dapper.net/RunDapp?dappName=craigslistmetros&v=1&applyToUrl=http%3A%2F%2Fgeo.craigslist.org%2Fiso%2Fus")
geostr = fh.read()
fh.close()
dom = minidom.parseString(geostr)
nodes = dom.getElementsByTagName("geo")
outfh = open(METROS_FN, "w+")
domains = []
for node in nodes:
domain = node.getAttribute("href")
#print "finding submetros within", domain
fh1 = urllib.urlopen(domain)
domain_homepage = fh1.read()
fh1.close()
#<td align="center" colspan="5" id="topban">
#<div>
#<h2>new york city</h2> <sup><a href="http://en.wikipedia.org/wiki/New_York_City">w</a></sup>
#<span class="for"><a href="/mnh/" title="manhattan">mnh</a> <a href="/brk/" title="brooklyn">brk</a> <a href="/que/" title="queens">que</a> <a href="/brx/" title="bronx">brx</a> <a href="/stn/" title="staten island">stn</a> <a href="/jsy/" title="new jersey">jsy</a> <a href="/lgi/" title="long island">lgi</a> <a href="/wch/" title="westchester">wch</a> <a href="/fct/" title="fairfield">fct</a> </span>
#</div>
#</td>
topbanstrs = re.findall(r'<td align="center" colspan="5" id="topban">.+?</td>', domain_homepage, re.DOTALL)
for topbanstr in topbanstrs:
links = re.findall(r'<a href="/(.+?)".+?title="(.+?)".+?</a>', topbanstr, re.DOTALL)
if len(links) > 0:
for link in links:
print domain+link[0], ":", link[1]
outfh.write(domain+link[0]+"|"+link[1]+"\n")
else:
names = re.findall(r'<h2>(.+?)</h2>', domain_homepage, re.DOTALL)
print domain, ":", names[0]
outfh.write(domain+"|"+names[0]+"\n")
outfh.close()
def crawl(url, ignore):
global crawlers, crawlers_lock, pages, page_lock, MAX_CRAWLERS
if url in pages:
return
while crawlers > MAX_CRAWLERS:
time.sleep(1)
# we don't care if several wake at once
crawlers_lock.acquire()
crawlers = crawlers + 1
crawlers_lock.release()
#proxied_url = "http://suprfetch.appspot.com/?url="+urllib.quote(url+"?for_google_and_craigslist.org_project_footprint_please_dont_block")
proxied_url = "http://suprfetch.appspot.com/?url="+urllib.quote(url)
page = ""
attempts = 0
while attempts < 3 and page == "":
try:
fh = urllib.urlopen(proxied_url)
page = fh.read()
fh.close()
except:
page = "" # in case close() threw exception
attempts = attempts + 1
print "open failed, retry after", attempts, "attempts (url="+url+")"
time.sleep(1)
if re.search(r'This IP has been automatically blocked', page, re.DOTALL):
print "uh oh: craiglist is blocking us (IP blocking). exiting..."
sys.exit(1)
if (re.search(r'sorry.google.com/sorry/', page) or
re.search(r'to automated requests from a computer virus or spyware', page, re.DOTALL)):
print "uh oh: google is blocking us (DOS detector). exiting..."
sys.exit(1)
if re.search(r'<TITLE>302 Moved</TITLE>"',page, re.DOTALL):
newlocstr = re.findall(r'The document has moved <A HREF="(.+?)"',page)
print "being redirected to",newlocstr[0]
crawl(newlocstr[0], "foo")
return
if attempts >= 3:
print "crawl failed after 3 attempts:",url
return
page_lock.acquire()
pages[url] = page
page_lock.release()
cached_page = re.sub(r'(?:\r?\n|\r)',' ',page)
cachefile_lock.acquire()
outfh = open(CACHE_FN, "a")
outfh.write(url+"-Q-"+cached_page+"\n")
outfh.close()
cachefile_lock.release()
crawlers_lock.acquire()
crawlers = crawlers - 1
crawlers_lock.release()
def wait_for_page(url):
res = ""
while res == "":
page_lock.acquire()
if url in pages:
res = pages[url]
page_lock.release()
if res == "":
time.sleep(2)
return res
def sync_fetch(url):
crawl(url, "")
if url not in pages:
print "sync_fetch, failed to crawl url",url
sys.exit(1)
return pages[url]
progstart = time.time()
def secs_since_progstart():
global progstart
return time.time() - progstart
def crawl_metro_page(url, unused):
global crawlers, crawlers_lock, pages, page_lock
listingpage = sync_fetch(url)
listingurls = re.findall(r'<p><a href="/(.+?)">', listingpage)
base = re.sub(r'.org/.+', '.org/', url)
for listing_url in listingurls:
#print "found",base+listing_url,"in",url
crawl(base+listing_url, "")
path = re.sub(r'[^/]+$', '', url)
nextpages = re.findall(r'<a href="(index[0-9]+[.]html)"', listingpage)
for nextpage_url in nextpages:
#print "found",path+nextpage_url,"in",url
thread.start_new_thread(crawl_metro_page, (path+nextpage_url, ""))
def parse_cache_file(s, listings_only=False, printerrors=True):
global pages
for i,line in enumerate(s.splitlines()):
#print line[0:100]
res = re.findall(r'^(.+?)-Q-(.+)', line)
try:
url,page = res[0][0], res[0][1]
if (not listings_only or re.search(r'html$', url)):
pages[url] = page
except:
if printerrors:
print "error parsing cache file on line",i+1
print line
def load_cache():
global CACHE_FN
try:
fh = open(CACHE_FN, "r")
instr = fh.read()
print "closing cache file", CACHE_FN
fh.close()
print "parsing cache data", len(instr), "bytes"
parse_cache_file(instr, False)
print "loaded", len(pages), "pages."
except:
# ignore errors if file doesn't exist
pass
def print_status():
global pages, num_cached_pages, crawlers
samesame = 0
last_crawled_pages = 0
while True:
crawled_pages = len(pages) - num_cached_pages
pages_per_sec = int(crawled_pages/secs_since_progstart())
msg = str(secs_since_progstart())+": main thread: "
msg += "waiting for " + str(crawlers) + " crawlers.\n"
msg += str(crawled_pages) + " pages crawled so far"
msg += "(" + str(pages_per_sec) + " pages/sec). "
msg += str(len(pages)) + " total pages."
print msg
if last_crawled_pages == crawled_pages:
samesame += 1
if samesame >= 100:
print "done (waited long enough)."
break
else:
last_crawled_pages = crawled_pages
time.sleep(2)
from optparse import OptionParser
if __name__ == "__main__":
parser = OptionParser("usage: %prog [options]...")
parser.set_defaults(metros=False)
parser.set_defaults(load_cache=True)
parser.add_option("--metros", action="store_true", dest="metros")
parser.add_option("--load_cache", action="store_true", dest="load_cache")
parser.add_option("--noload_cache", action="store_false", dest="load_cache")
(options, args) = parser.parse_args(sys.argv[1:])
if options.metros:
crawl_metros()
read_metros()
if options.load_cache:
load_cache()
else:
try:
os.unlink(CACHE_FN)
except:
pass
num_cached_pages = len(pages)
outstr = ""
for url in metros:
thread.start_new_thread(crawl_metro_page, (url+"vol/", ""))
print_status()
sys.exit(0)
| Python |
#!/usr/bin/python
# Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
main() for the crawling/parsing/loading pipeline
"""
#from xml.dom.ext import PrettyPrint
import gzip
import hashlib
import urllib
import re
from datetime import datetime
import parse_footprint
import parse_gspreadsheet
import parse_usaservice
import parse_handsonnetwork
import parse_idealist
import parse_craigslist
import parse_americorps
import parse_userpostings
import parse_servenet
import parse_volunteermatch
import subprocess
import sys
import time
import xml_helpers as xmlh
from optparse import OptionParser
import dateutil
import dateutil.tz
import dateutil.parser
FIELDSEP = "\t"
RECORDSEP = "\n"
MAX_ABSTRACT_LEN = 300
DEBUG = False
PROGRESS = False
PRINTHEAD = False
ABRIDGED = False
OUTPUTFMT = "fpxml"
# set a nice long timeout
import socket
socket.setdefaulttimeout(600.0)
# pick a latlng that'll never match real queries
UNKNOWN_LAT = UNKNOWN_LNG = "-10"
UNKNOWN_LATLNG = UNKNOWN_LAT + "," + UNKNOWN_LNG
# pick a latlng that'll never match real queries
LOCATIONLESS_LAT = LOCATIONLESS_LNG = "0"
LOCATIONLESS_LATLNG = LOCATIONLESS_LAT + "," + LOCATIONLESS_LNG
HEADER_ALREADY_OUTPUT = False
#BASE_PUB_URL = "http://change.gov/"
BASE_PUB_URL = "http://adamsah.net/"
SEARCHFIELDS = {
# required
"description":"builtin",
"event_date_range":"builtin",
"link":"builtin",
"location":"builtin",
"title":"builtin",
# needed for search restricts
"latitude":"float",
"longitude":"float",
# needed for basic search results
"id":"builtin",
"detailURL":"URL",
"abstract":"string",
"location_string":"string",
"feed_providerName":"string",
}
FIELDTYPES = {
"title":"builtin",
"description":"builtin",
"link":"builtin",
"event_type":"builtin",
"quantity":"builtin",
"image_link":"builtin",
"event_date_range":"builtin",
"id":"builtin",
"location":"builtin",
"paid":"boolean",
"openended":"boolean",
"volunteersSlots":"integer",
"volunteersFilled":"integer",
"volunteersNeeded":"integer",
"minimumAge":"integer",
"latitude":"float",
"longitude":"float",
"providerURL":"URL",
"detailURL":"URL",
"org_organizationURL":"URL",
"org_logoURL":"URL",
"org_providerURL":"URL",
"feed_providerURL":"URL",
"lastUpdated":"dateTime",
"expires":"dateTime",
"feed_createdDateTime":"dateTime",
# note: type "location" isn"t safe because the Base geocoder can fail,
# causing the record to be rejected
"duration":"string",
"abstract":"string",
"sexRestrictedTo":"string",
"skills":"string",
"contactName":"string",
"contactPhone":"string",
"contactEmail":"string",
"language":"string",
"org_name":"string",
"org_missionStatement":"string",
"org_description":"string",
"org_phone":"string",
"org_fax":"string",
"org_email":"string",
"categories":"string",
"audiences":"string",
"commitmentHoursPerWeek":"string",
"employer":"string",
"feed_providerName":"string",
"feed_description":"string",
"providerID":"string",
"feed_providerID":"string",
"feedID":"string",
"opportunityID":"string",
"organizationID":"string",
"sponsoringOrganizationID":"strng",
"volunteerHubOrganizationID":"string",
"org_nationalEIN":"string",
"org_guidestarID":"string",
"venue_name":"string",
"location_string":"string",
"orgLocation":"string",
"hidden_details":"string",
}
def print_progress(msg, filename="", progress=None):
"""print progress indicator."""
# not allowed to say progress=PROGRESS as a default arg
if progress == None:
progress = PROGRESS
xmlh.print_progress(msg, filename, progress=progress)
def print_status(msg, filename="", progress=None):
"""print status indicator, for stats collection."""
if progress == None:
progress = PROGRESS
xmlh.print_status(msg, filename, progress=progress)
# Google Base uses ISO8601... in PST -- I kid you not:
# http://base.google.com/support/bin/answer.py?
# answer=78170&hl=en#Events%20and%20Activities
# and worse, you have to change an env var in python...
def convert_dt_to_gbase(datestr, timestr, timezone):
"""converts dates like YYYY-MM-DD, times like HH:MM:SS and
timezones like America/New_York, into Google Base format."""
try:
tzinfo = dateutil.tz.tzstr(timezone)
except:
tzinfo = dateutil.tz.tzutc()
try:
timestr = dateutil.parser.parse(datestr + " " + timestr)
except:
print "error parsing datetime: "+datestr+" "+timestr
timestr = timestr.replace(tzinfo=tzinfo)
pst = dateutil.tz.tzstr("PST8PDT")
timestr = timestr.astimezone(pst)
if timestr.year < 1900:
timestr = timestr.replace(year=timestr.year+1900)
res = timestr.strftime("%Y-%m-%dT%H:%M:%S")
res = re.sub(r'Z$', '', res)
return res
CSV_REPEATED_FIELDS = ['categories', 'audiences']
DIRECT_MAP_FIELDS = [
'opportunityID', 'organizationID', 'volunteersSlots', 'volunteersFilled',
'volunteersNeeded', 'minimumAge', 'sexRestrictedTo', 'skills', 'contactName',
'contactPhone', 'contactEmail', 'providerURL', 'language', 'lastUpdated',
'expires', 'detailURL']
ORGANIZATION_FIELDS = [
'nationalEIN', 'guidestarID', 'name', 'missionStatement', 'description',
'phone', 'fax', 'email', 'organizationURL', 'logoURL', 'providerURL']
def flattener_value(node):
"""return a DOM node's first child, sans commas"""
if (node.firstChild != None):
return node.firstChild.data.replace(",", "")
else:
return ""
def flatten_to_csv(domnode):
"""prints the children of a DOM node as CSV separated strings"""
# pylint: disable-msg=W0141
return ",".join(filter(lambda x: x != "",
map(flattener_value, domnode.childNodes)))
def output_field(name, value):
"""print a field value, handling long strings, header lines and
custom datatypes."""
#global PRINTHEAD, DEBUG
if PRINTHEAD:
if name not in FIELDTYPES:
print datetime.now(), "no type for field: " + name + FIELDTYPES[name]
sys.exit(1)
elif FIELDTYPES[name] == "builtin":
return name
elif OUTPUTFMT == "basetsv":
return "c:"+name+":"+FIELDTYPES[name]
else:
return name+":"+FIELDTYPES[name]
if OUTPUTFMT == "basetsv":
# grr: Base tries to treat commas in custom fields as being lists ?!
# http://groups.google.com/group/base-help-basics/browse_thread/thread/
# c4f51447191a6741
# TODO: note that this may cause fields to expand beyond their maxlen
# (e.g. abstract)
value = re.sub(r',', ';;', value)
if DEBUG:
if (len(value) > 70):
value = value[0:67] + "... (" + str(len(value)) + " bytes)"
return name.rjust(22) + " : " + value
if (FIELDTYPES[name] == "dateTime"):
return convert_dt_to_gbase(value, "", "UTC")
return value
def get_addr_field(node, field):
"""assuming a node is named (field), return it with optional trailing spc."""
addr = xmlh.get_tag_val(node, field)
if addr != "":
addr += " "
return addr
def city_loc_fields(node):
"""synthesize a city-region-postal-country string."""
# note: avoid commas, so it works with CSV
# (this is good enough for the geocoder)
loc = ""
loc += get_addr_field(node, "city")
loc += get_addr_field(node, "region")
loc += get_addr_field(node, "postalCode")
loc += get_addr_field(node, "country")
return loc
def compute_loc_field(node):
"""concatenate street address fields"""
loc = get_addr_field(node, "streetAddress1")
loc += get_addr_field(node, "streetAddress2")
loc += get_addr_field(node, "streetAddress3")
return loc
def compute_city_field(node):
"""concatenate street address and city/region/postal/country fields"""
loc = compute_loc_field(node)
loc += city_loc_fields(node)
return loc
def lookup_loc_fields(node):
"""try a multitude of field combinations to get a geocode."""
fullloc = loc = compute_city_field(node)
latlng = xmlh.get_tag_val(node, "latitude") + ","
latlng += xmlh.get_tag_val(node, "longitude")
if latlng == ",":
latlng = geocode(loc)
if latlng == "":
# sometimes address1 contains un-geocodable descriptive language,
# e.g. venue name, "around the corner from ..." etc.
loc = get_addr_field(node, "streetAddress2")
loc += get_addr_field(node, "streetAddress3")
loc += city_loc_fields(node)
latlng = geocode(loc)
if latlng == "":
# rarely, addr1 & addr are both descriptive
loc = get_addr_field(node, "streetAddress3")
loc += city_loc_fields(node)
latlng = geocode(loc)
if latlng == "":
# missing or bogus address lines
loc = city_loc_fields(node)
if latlng == "":
# missing or bogus city name
loc = get_addr_field(node, "postalCode")
loc += get_addr_field(node, "country")
latlng = geocode(loc)
if latlng == "":
# missing or bogus postalcode
loc = get_addr_field(node, "city")
loc += get_addr_field(node, "region")
loc += get_addr_field(node, "country")
latlng = geocode(loc)
if latlng == "":
loc += get_addr_field(node, "region")
loc += get_addr_field(node, "country")
latlng = geocode(loc)
# TODO: get more sophisticated about reverse geocoding
# entries missing some fields
if latlng == "":
latlng = "0,0"
else:
if fullloc == "":
fullloc = reverse_geocode(latlng)
if loc == "":
loc = reverse_geocode(latlng)
if DEBUG:
print datetime.now(), "geocode: " + loc + "=" + latlng
return (fullloc, latlng, loc)
def output_loc_field(node, mapped_name):
"""macro for output_field( convert node to loc field )"""
return output_field(mapped_name,
compute_loc_field(node)+city_loc_fields(node))
def output_tag_value(node, fieldname):
"""macro for output_field( get node value )"""
return output_field(fieldname, xmlh.get_tag_val(node, fieldname))
def output_tag_value_renamed(node, xmlname, newname):
"""macro for output_field( get node value ) then emitted as newname"""
return output_field(newname, xmlh.get_tag_val(node, xmlname))
def compute_stable_id(opp, org, locstr, openended, duration,
hrs_per_week, startend):
"""core algorithm for computing an opportunity's unique id."""
if DEBUG:
print "opp=" + str(opp) # shuts up pylint
eid = xmlh.get_tag_val(org, "nationalEIN")
if (eid == ""):
# support informal "organizations" that lack EINs
eid = xmlh.get_tag_val(org, "organizationURL")
# TODO: if two providers have same listing, good odds the
# locations will be slightly different...
loc = locstr
# TODO: if two providers have same listing, the time info
# is unlikely to be exactly the same, incl. missing fields
timestr = openended + duration + hrs_per_week + startend
return hashlib.md5(eid + loc + timestr).hexdigest()
def get_abstract(opp):
"""process abstract-- shorten, strip newlines and formatting."""
abstract = xmlh.get_tag_val(opp, "abstract")
if abstract == "":
abstract = xmlh.get_tag_val(opp, "description")
# strip \n and \b
abstract = re.sub(r'(\\[bn])+', ' ', abstract)
# strip XML escaped chars
abstract = re.sub(r'&([a-z]+|#[0-9]+);', '', abstract)
abstract = abstract[:MAX_ABSTRACT_LEN]
return abstract
def get_direct_mapped_fields(opp, org):
"""map a field directly from FPXML to Google Base."""
if ABRIDGED:
outstr = output_field("abstract", get_abstract(opp))
return outstr
outstr = ""
paid = xmlh.get_tag_val(opp, "paid")
if (paid == "" or paid.lower()[0] != "y"):
paid = "n"
else:
paid = "y"
outstr += output_field("paid", paid)
for field in DIRECT_MAP_FIELDS:
outstr += FIELDSEP + output_tag_value(opp, field)
for field in ORGANIZATION_FIELDS:
outstr += FIELDSEP + output_field("org_"+field,
xmlh.get_tag_val(org, field))
for field in CSV_REPEATED_FIELDS:
outstr += FIELDSEP
fieldval = opp.getElementsByTagName(field)
val = ""
if (fieldval.length > 0):
val = flatten_to_csv(fieldval[0])
outstr += output_field(field, val)
# abstract
outstr += FIELDSEP
outstr += output_field("abstract", get_abstract(opp))
# orgLocation
outstr += FIELDSEP
fieldval = opp.getElementsByTagName("orgLocation")
if (fieldval.length > 0):
outstr += output_loc_field(fieldval[0], "orgLocation")
else:
outstr += output_field("orgLocation", "")
# hidden_details
outstr += FIELDSEP
fieldval = opp.getElementsByTagName("hiddenDetails")
if (fieldval.length > 0):
outstr += output_field(fieldval[0], "hidden_details")
else:
outstr += output_field("hidden_details", "some hidden text. asdfghjkl.")
return outstr
def get_base_other_fields(opp, org):
"""These are fields that exist in other Base schemas-- for the sake of
possible syndication, we try to make ourselves look like other Base
feeds. Since we're talking about a small overlap, these fields are
populated *as well as* direct mapping of the footprint XML fields."""
if ABRIDGED:
outstr = output_field("employer", xmlh.get_tag_val(org, "name"))
return outstr
outstr = output_field("quantity",
xmlh.get_tag_val(opp, "volunteersNeeded"))
outstr += FIELDSEP + output_field("employer",
xmlh.get_tag_val(org, "name"))
outstr += FIELDSEP + output_field("image_link",
xmlh.get_tag_val(org, "logoURL"))
# don't map expiration_date -- Base has strict limits (e.g. 2 weeks)
return outstr
def get_event_reqd_fields(opp):
"""Fields required by Google Base, note that they aren't necessarily
used by the FP app."""
outstr = output_tag_value(opp, "title")
outstr += FIELDSEP + output_tag_value(opp, "description")
outstr += FIELDSEP + output_field("link", BASE_PUB_URL)
return outstr
def get_feed_fields(feedinfo):
"""Fields from the <Feed> portion of FPXML."""
if ABRIDGED:
outstr = output_tag_value_renamed(feedinfo,
"providerName", "feed_providerName")
return outstr
outstr = output_tag_value(feedinfo, "feedID")
outstr += FIELDSEP + output_tag_value_renamed(
feedinfo, "providerID", "feed_providerID")
outstr += FIELDSEP + output_tag_value_renamed(
feedinfo, "providerName", "feed_providerName")
outstr += FIELDSEP + output_tag_value_renamed(
feedinfo, "providerURL", "feed_providerURL")
outstr += FIELDSEP + output_tag_value_renamed(
feedinfo, "description", "feed_description")
outstr += FIELDSEP + output_tag_value_renamed(
feedinfo, "createdDateTime", "feed_createdDateTime")
return outstr
GEOCODE_DEBUG = False
GEOCODE_CACHE = None
RGEOCODE_CACHE = None
GEOCODE_CACHE_FN = "geocode_cache.txt"
def approx_latlng(latlng):
res = re.sub(r'([.]\d\d)\d+', r'\1', latlng)
#print "approx_latlng("+latlng+")="+res
return res
def cleanup_geocode_query(query):
return re.sub(r'\s\s+', r' ', re.sub(r'\\[tnrfv]', r' ', query)).lower().strip()
def load_geocode_cache():
global GEOCODE_CACHE, RGEOCODE_CACHE
GEOCODE_CACHE = {}
RGEOCODE_CACHE = {}
geocode_fh = open(GEOCODE_CACHE_FN, "r")
try:
for line in geocode_fh:
if "|" in line:
key, val = line.split("|")
key = cleanup_geocode_query(key)
latlng = val.strip()
GEOCODE_CACHE[key] = latlng
RGEOCODE_CACHE[approx_latlng(latlng)] = key
if GEOCODE_DEBUG and len(GEOCODE_CACHE) % 2000 == 0:
print "read", len(GEOCODE_CACHE), "geocode cache entries."
finally:
geocode_fh.close()
def geocode_call(query, retries, parsefunc):
# geocode with google maps, and cache responses
params = urllib.urlencode(
{'q':query, 'output':'csv', 'oe':'utf8', 'sensor':'false',
'key':'ABQIAAAAxq97AW0x5_CNgn6-nLxSrxQuOQhskTx7t90ovP5xOuY' + \
'_YrlyqBQajVan2ia99rD9JgAcFrdQnTD4JQ'})
if GEOCODE_DEBUG:
print datetime.now(), "(reverse)geocoding '" + query + "'..."
maps_fh = urllib.urlopen("http://maps.google.com/maps/geo?%s" % params)
res = maps_fh.readline()
maps_fh.close()
if GEOCODE_DEBUG:
print datetime.now(), "response: "+res
if "," not in res:
# fail and also don't cache
return ""
respcode, zoom, val = parsefunc(res)
zoom = zoom # shutup pylint
if respcode == 500 or respcode == 620:
if GEOCODE_DEBUG:
print datetime.now(), "geocoder quota exceeded-- sleeping..."
time.sleep(1)
return geocode_call(query, retries - 1, parsefunc)
# these results get cached
geocode_fh = open(GEOCODE_CACHE_FN, "a")
if re.match(r'[0-9.+-]+,[0-9.+-]+', val):
# gecoding
cacheline = query + "|" + val
GEOCODE_CACHE[query] = val
RGEOCODE_CACHE[approx_latlng(val)] = query
else:
# reverse geocoding
cacheline = val + "|" + query
GEOCODE_CACHE[val] = query
RGEOCODE_CACHE[approx_latlng(query)] = val
if GEOCODE_DEBUG:
print datetime.now(), "storing cacheline: "+cacheline
else:
print_progress("storing cacheline: "+cacheline)
geocode_fh.write(cacheline + "\n")
geocode_fh.close()
return val
def reverse_geocode(latlng, retries=4):
global RGEOCODE_CACHE
latlng = cleanup_geocode_query(latlng)
latlng = re.sub(r'\s', '', latlng)
if RGEOCODE_CACHE == None:
load_geocode_cache()
if approx_latlng(latlng) in RGEOCODE_CACHE:
return RGEOCODE_CACHE[approx_latlng(latlng)]
def parsefunc(response):
# 200,8,"1475 Broadway, New York, NY 10036, USA"
match = re.search(r'(\d+),(\d+),"(.+)"', response)
if match:
respcode = int(match.group(1))
zoom = int(match.group(2))
loc = match.group(3)
# TODO: total hack to extract the city granularity
loc = re.sub(r'^.+,([^,]+,[^,]+,[^,]+)$', r'\1', loc).strip()
return respcode, zoom, loc
if GEOCODE_DEBUG:
print datetime.now(), "unparseable response: "+response[0:80]
return 999, 0, ""
return geocode_call(latlng, retries, parsefunc)
def geocode(addr, retries=4):
"""convert a string addr to a "lat,long" string"""
global GEOCODE_CACHE
addr = cleanup_geocode_query(addr)
# ignore leading/trailing punctuation
addr = re.sub(r'^[^0-9a-z]+', r'', addr)
addr = re.sub(r'[^0-9a-z]+$', r'', addr)
if GEOCODE_CACHE == None:
load_geocode_cache()
if addr in GEOCODE_CACHE:
return GEOCODE_CACHE[addr]
def parsefunc(locstr):
match = re.search(r'(\d+),(\d+),([0-9.+-]+,[0-9.+-]+)', locstr)
if match:
respcode = int(match.group(1))
zoom = int(match.group(2))
latlng = match.group(3)
return respcode, zoom, latlng
if GEOCODE_DEBUG:
print datetime.now(), "unparseable response: "+locstr[0:80]
return 999, 0, UNKNOWN_LATLNG
return geocode_call(addr, retries, parsefunc)
def output_opportunity(opp, feedinfo, known_orgs, totrecs):
"""main function for outputting a complete opportunity."""
outstr = ""
opp_id = xmlh.get_tag_val(opp, "volunteerOpportunityID")
if (opp_id == ""):
print_progress("no opportunityID")
return totrecs, ""
org_id = xmlh.get_tag_val(opp, "sponsoringOrganizationID")
if (org_id not in known_orgs):
print_progress("unknown sponsoringOrganizationID: " +\
org_id + ". skipping opportunity " + opp_id)
return totrecs, ""
org = known_orgs[org_id]
opp_locations = opp.getElementsByTagName("location")
opp_times = opp.getElementsByTagName("dateTimeDuration")
repeated_fields = get_repeated_fields(feedinfo, opp, org)
if len(opp_times) == 0:
opp_times = [ None ]
for opptime in opp_times:
if opptime == None:
startend = convert_dt_to_gbase("1971-01-01", "00:00:00-00:00", "UTC")
openended = "Yes"
else:
# event_date_range
# e.g. 2006-12-20T23:00:00/2006-12-21T08:30:00, in PST (GMT-8)
start_date = xmlh.get_tag_val(opptime, "startDate")
start_time = xmlh.get_tag_val(opptime, "startTime")
end_date = xmlh.get_tag_val(opptime, "endDate")
end_time = xmlh.get_tag_val(opptime, "endTime")
openended = xmlh.get_tag_val(opptime, "openEnded")
# e.g. 2006-12-20T23:00:00/2006-12-21T08:30:00, in PST (GMT-8)
if (start_date == ""):
start_date = "1971-01-01"
start_time = "00:00:00-00:00"
startend = convert_dt_to_gbase(start_date, start_time, "UTC")
if (end_date != "" and end_date + end_time > start_date + start_time):
startend += "/"
startend += convert_dt_to_gbase(end_date, end_time, "UTC")
duration = xmlh.get_tag_val(opptime, "duration")
hrs_per_week = xmlh.get_tag_val(opptime, "commitmentHoursPerWeek")
time_fields = get_time_fields(openended, duration, hrs_per_week, startend)
if len(opp_locations) == 0:
opp_locations = [ None ]
for opploc in opp_locations:
totrecs = totrecs + 1
if PROGRESS and totrecs % 250 == 0:
print_progress(str(totrecs)+" records generated.")
if opploc == None:
locstr, latlng, geocoded_loc = ("", "", "")
loc_fields = get_loc_fields("0.0", "0.0", "0.0", "", "")
else:
locstr, latlng, geocoded_loc = lookup_loc_fields(opploc)
lat = lng = "0.0"
if latlng != "":
lat, lng = latlng.split(",")
loc_fields = get_loc_fields("", str(float(lat)+1000.0),
str(float(lng)+1000.0), geocoded_loc,
xmlh.get_tag_val(opploc, "name"))
#if locstr != geocoded_loc:
# #print datetime.now(), "locstr: ", locstr, " geocoded_loc: ", \
# # geocoded_loc
# descs = opp.getElementsByTagName("description")
# encoded_locstr = escape(locstr)
# encoded_locstr = unicode(encoded_locstr,errors="ignore")
# encoded_locstr = encoded_locstr.encode('utf-8', "ignore")
# descs[0].firstChild.data += ". detailed location information: "
# descs[0].firstChild.data += encoded_locstr
opp_id = compute_stable_id(opp, org, locstr, openended, duration,
hrs_per_week, startend)
outstr += output_field("id", opp_id)
outstr += repeated_fields
outstr += time_fields
outstr += loc_fields
outstr += RECORDSEP
return totrecs, outstr
def get_time_fields(openended, duration, hrs_per_week,
event_date_range):
"""output time-related fields, e.g. for multiple times per event."""
if ABRIDGED:
time_fields = FIELDSEP + output_field("event_date_range", event_date_range)
return time_fields
time_fields = FIELDSEP + output_field("openended", openended)
time_fields += FIELDSEP + output_field("duration", duration)
time_fields += FIELDSEP + output_field("commitmentHoursPerWeek", hrs_per_week)
time_fields += FIELDSEP + output_field("event_date_range", event_date_range)
return time_fields
def get_loc_fields(location, latitude, longitude, location_string,
venue_name):
"""output location-related fields, e.g. for multiple locations per event."""
if ABRIDGED:
loc_fields = FIELDSEP + output_field("location", location)
loc_fields += FIELDSEP + output_field("latitude", latitude)
loc_fields += FIELDSEP + output_field("longitude", longitude)
loc_fields += FIELDSEP + output_field("location_string", location_string)
return loc_fields
loc_fields = FIELDSEP + output_field("location", location)
loc_fields += FIELDSEP + output_field("latitude", latitude)
loc_fields += FIELDSEP + output_field("longitude", longitude)
loc_fields += FIELDSEP + output_field("location_string", location_string)
loc_fields += FIELDSEP + output_field("venue_name", venue_name)
return loc_fields
def get_repeated_fields(feedinfo, opp, org):
"""output all fields that are repeated for each time and location."""
repeated_fields = FIELDSEP + get_feed_fields(feedinfo)
repeated_fields += FIELDSEP + get_event_reqd_fields(opp)
repeated_fields += FIELDSEP + get_base_other_fields(opp, org)
repeated_fields += FIELDSEP + get_direct_mapped_fields(opp, org)
return repeated_fields
def output_header(feedinfo, opp, org):
"""fake opportunity printer, which prints the header line instead."""
global PRINTHEAD, HEADER_ALREADY_OUTPUT
# no matter what, only print the header once!
if HEADER_ALREADY_OUTPUT:
return ""
HEADER_ALREADY_OUTPUT = True
PRINTHEAD = True
outstr = output_field("id", "")
repeated_fields = get_repeated_fields(feedinfo, opp, org)
time_fields = get_time_fields("", "", "", "")
loc_fields = get_loc_fields("", "", "", "", "")
PRINTHEAD = False
return outstr + repeated_fields + time_fields + loc_fields + RECORDSEP
def convert_to_footprint_xml(instr, do_fastparse, maxrecs, progress):
"""macro for parsing an FPXML string to XML then format it."""
#if False:
# # grr: RAM explosion, even with pulldom...
# totrecs = 0
# nodes = xml.dom.pulldom.parseString(instr)
# outstr = '<?xml version="1.0" ?>'
# outstr += '<FootprintFeed schemaVersion="0.1">'
# for eltype, node in nodes:
# if eltype == 'START_ELEMENT':
# if node.nodeName == 'VolunteerOpportunity':
# if progress and totrecs > 0 and totrecs % 250 == 0:
# print datetime.now(), ": ", totrecs, " opps processed."
# totrecs = totrecs + 1
# if maxrecs > 0 and totrecs > maxrecs:
# break
# if (node.nodeName == 'FeedInfo' or
# node.nodeName == 'Organization' or
# node.nodeName == 'VolunteerOpportunity'):
# nodes.expandNode(node)
# prettyxml = xmlh.prettyxml(node)
# outstr += prettyxml
# outstr += '</FootprintFeed>'
# return outstr
if do_fastparse:
res, numorgs, numopps = parse_footprint.parse_fast(instr, maxrecs, progress)
return res
else:
# slow parse
xmldoc = parse_footprint.parse(instr, maxrecs, progress)
# TODO: maxrecs
return xmlh.prettyxml(xmldoc)
def convert_to_gbase_events_type(instr, origname, fastparse, maxrecs, progress):
"""non-trivial logic for converting FPXML to google base formatting."""
# todo: maxrecs
outstr = ""
print_progress("convert_to_gbase_events_type...", "", progress)
example_org = None
known_orgs = {}
if fastparse:
known_elnames = [
'FeedInfo', 'FootprintFeed', 'Organization', 'Organizations',
'VolunteerOpportunities', 'VolunteerOpportunity', 'abstract',
'audienceTag', 'audienceTags', 'categoryTag', 'categoryTags',
'city', 'commitmentHoursPerWeek', 'contactEmail', 'contactName',
'contactPhone', 'country', 'createdDateTime', 'dateTimeDuration',
'dateTimeDurationType', 'dateTimeDurations', 'description',
'detailURL', 'directions', 'donateURL', 'duration', 'email',
'endDate', 'endTime', 'expires', 'fax', 'feedID', 'guidestarID',
'iCalRecurrence', 'language', 'latitude', 'lastUpdated', 'location',
'locationType', 'locations', 'logoURL', 'longitude', 'minimumAge',
'missionStatement', 'name', 'nationalEIN', 'openEnded',
'organizationID', 'organizationURL', 'paid', 'phone', 'postalCode',
'providerID', 'providerName', 'providerURL', 'region', 'schemaVersion',
'sexRestrictedEnum', 'sexRestrictedTo', 'skills',
'sponsoringOrganizationID', 'startDate', 'startTime', 'streetAddress1',
'streetAddress2', 'streetAddress3', 'title', 'tzOlsonPath', 'virtual',
'volunteerHubOrganizationID', 'volunteerOpportunityID',
'volunteersFilled', 'volunteersSlots', 'volunteersNeeded', 'yesNoEnum'
]
numopps = 0
feedchunks = re.findall(
re.compile('<FeedInfo>.+?</FeedInfo>', re.DOTALL), instr)
for feedchunk in feedchunks:
print_progress("found FeedInfo.", progress=progress)
feedinfo = xmlh.simple_parser(feedchunk, known_elnames, False)
orgchunks = re.findall(
re.compile('<Organization>.+?</Organization>', re.DOTALL), instr)
for orgchunk in orgchunks:
if progress and len(known_orgs) % 250 == 0:
print_progress(str(len(known_orgs))+" organizations seen.")
org = xmlh.simple_parser(orgchunk, known_elnames, False)
org_id = xmlh.get_tag_val(org, "organizationID")
if (org_id != ""):
known_orgs[org_id] = org
if example_org == None:
example_org = org
oppchunks = re.findall(
re.compile('<VolunteerOpportunity>.+?</VolunteerOpportunity>',
re.DOTALL), instr)
for oppchunk in oppchunks:
opp = xmlh.simple_parser(oppchunk, None, False)
if not HEADER_ALREADY_OUTPUT:
outstr = output_header(feedinfo, opp, example_org)
numopps, spiece = output_opportunity(opp, feedinfo, known_orgs, numopps)
outstr += spiece
if (maxrecs > 0 and numopps > maxrecs):
break
## note: preserves order, so diff works (vs. one sweep per element type)
#chunks = re.findall(
# re.compile('<(?:Organization|VolunteerOpportunity|FeedInfo)>.+?'+
# '</(?:Organization|VolunteerOpportunity|FeedInfo)>',
# re.DOTALL), instr)
#for chunk in chunks:
# node = xmlh.simple_parser(chunk, known_elnames, False)
# if re.search("<FeedInfo>", chunk):
# print_progress("found FeedInfo.", progress=progress)
# feedinfo = xmlh.simple_parser(chunk, known_elnames, False)
# continue
# if re.search("<Organization>", chunk):
# if progress and len(known_orgs) % 250 == 0:
# print_progress(str(len(known_orgs))+" organizations seen.")
# org = xmlh.simple_parser(chunk, known_elnames, False)
# org_id = xmlh.get_tag_val(org, "organizationID")
# if (org_id != ""):
# known_orgs[org_id] = org
# if example_org == None:
# example_org = org
# continue
# if re.search("<VolunteerOpportunity>", chunk):
# global HEADER_ALREADY_OUTPUT
# opp = xmlh.simple_parser(chunk, None, False)
# if numopps == 0:
# # reinitialize
# outstr = output_header(feedinfo, node, example_org)
# numopps, spiece = output_opportunity(opp, feedinfo, known_orgs, numopps)
# outstr += spiece
# if (maxrecs > 0 and numopps > maxrecs):
# break
#numopps = 0
#nodes = xml.dom.pulldom.parseString(instr)
#example_org = None
#for type,node in nodes:
# if type == 'START_ELEMENT':
# if node.nodeName == 'FeedInfo':
# nodes.expandNode(node)
# feedinfo = node
# elif node.nodeName == 'Organization':
# nodes.expandNode(node)
# id = xmlh.get_tag_val(node, "organizationID")
# if (id != ""):
# known_orgs[id] = node
# if example_org == None:
# example_org = node
# elif node.nodeName == 'VolunteerOpportunity':
# nodes.expandNode(node)
# if numopps == 0:
# outstr += output_header(feedinfo, node, example_org)
# numopps, spiece = output_opportunity(node, feedinfo,
# known_orgs, numopps)
# outstr += spiece
else:
# not fastparse
footprint_xml = parse_footprint.parse(instr, maxrecs, progress)
feedinfos = footprint_xml.getElementsByTagName("FeedInfo")
if (feedinfos.length != 1):
print datetime.now(), "bad FeedInfo: should only be one section"
# TODO: throw error
sys.exit(1)
feedinfo = feedinfos[0]
organizations = footprint_xml.getElementsByTagName("Organization")
for org in organizations:
org_id = xmlh.get_tag_val(org, "organizationID")
if (org_id != ""):
known_orgs[org_id] = org
opportunities = footprint_xml.getElementsByTagName("VolunteerOpportunity")
numopps = 0
for opp in opportunities:
if numopps == 0:
outstr += output_header(feedinfo, opp, organizations[0])
numopps, spiece = output_opportunity(opp, feedinfo, known_orgs, numopps)
outstr += spiece
return outstr, len(known_orgs), numopps
def guess_shortname(filename):
"""from the input filename, guess which feed this is."""
if re.search("usa-?service", filename):
return "usaservice"
if re.search(r'meetup', filename):
return "meetup"
if re.search(r'barackobama[.]com', filename):
return "mybarackobama"
if re.search(r'united.*way', filename):
return "unitedway"
if re.search("habitat", filename):
return "habitat"
if re.search("americansolutions", filename):
return "americansolutions"
if re.search("spreadsheets[.]google[.]com", filename):
return "gspreadsheet"
if re.search("(handson|hot.footprint)", filename):
return "handsonnetwork"
if re.search("(volunteer[.]gov)", filename):
return "volunteergov"
if re.search("(whichoneis.com|beextra[.]org)", filename):
return "extraordinaries"
if re.search("idealist", filename):
return "idealist"
if re.search("(userpostings|/export/Posting)", filename):
return "footprint_userpostings"
if re.search("craigslist", filename):
return "craigslist"
if re.search("americorps", filename):
return "americorps"
if re.search("servenet", filename):
return "servenet"
if re.search("volunteermatch", filename):
return "volunteermatch"
return ""
def ftp_to_base(filename, ftpinfo, instr):
"""ftp the string to base, guessing the feed name from the orig filename."""
ftplib = __import__('ftplib')
stringio = __import__('StringIO')
dest_fn = guess_shortname(filename)
if dest_fn == "":
dest_fn = "footprint1.txt"
else:
dest_fn = dest_fn + "1.gz"
if re.search(r'[.]gz$', dest_fn):
print_progress("compressing data from "+str(len(instr))+" bytes", filename)
gzip_fh = gzip.open(dest_fn, 'wb', 9)
gzip_fh.write(instr)
gzip_fh.close()
data_fh = open(dest_fn, 'rb')
else:
data_fh = stringio.StringIO(instr)
host = 'uploads.google.com'
(user, passwd) = ftpinfo.split(":")
print_progress("connecting to " + host + " as user " + user + "...", filename)
ftp = ftplib.FTP(host)
welcomestr = re.sub(r'\n', '\\n', ftp.getwelcome())
print_progress("FTP server says: "+welcomestr, filename)
ftp.login(user, passwd)
print_progress("uploading filename "+dest_fn, filename)
success = False
while not success:
try:
ftp.storbinary("STOR " + dest_fn, data_fh, 8192)
success = True
except:
# probably ftplib.error_perm: 553: Permission denied on server. (Overwrite)
print_progress("upload failed-- sleeping and retrying...")
time.sleep(1)
if success:
print_progress("done uploading.")
else:
print_progress("giving up.")
ftp.quit()
data_fh.close()
def guess_parse_func(inputfmt, filename):
"""from the filename and the --inputfmt,guess the input type and parse func"""
if inputfmt == "fpxml" or re.search(r'fpxml', filename):
return "fpxml", parse_footprint.parse
if (inputfmt == "gspreadsheet" or
(inputfmt == None and
re.search(r'spreadsheets[.]google[.]com', filename))):
return "gspreadsheet", parse_gspreadsheet.parse
if (inputfmt == "usaservice" or inputfmt == "usasvc" or
(inputfmt == None and re.search(r'usa-?service', filename))):
return "usaservice", parse_usaservice.parse
if (inputfmt == "craigslist" or inputfmt == "cl" or
(inputfmt == None and re.search(r'craigslist', filename))):
return "craigslist", parse_craigslist.parse
if (inputfmt == "americorps" or
(inputfmt == None and re.search(r'americorps', filename))):
return "americorps", parse_americorps.parse
if (inputfmt == "servenet" or
(inputfmt == None and re.search(r'servenet', filename))):
return "servenet", parse_servenet.parse
if (inputfmt == "handson" or inputfmt == "handsonnetwork"):
return "handsonnetwork", parse_handsonnetwork.parse
if (inputfmt == None and re.search(r'united.*way', filename)):
return "fpxml", parse_footprint.parse
if (inputfmt == None and re.search(r'barackobama[.]com', filename)):
return "fpxml", parse_footprint.parse
if (inputfmt == None and re.search(r'(handson|hot.footprint)', filename)):
# now using FPXML
#parsefunc = parse_handsonnetwork.ParseFPXML
return "fpxml", parse_footprint.parse
if (inputfmt == None and re.search(r'habitat', filename)):
def parse_habitat(instr, maxrecs, progress):
# fixup bad escaping
newstr = re.sub(r'&code=', '&code=', instr)
return parse_footprint.parse_fast(newstr, maxrecs, progress)
return "badfpxml", parse_habitat
if (inputfmt == None and re.search(r'volunteer[.]gov', filename)):
return "fpxml", parse_footprint.parse
if (inputfmt == None and re.search(r'americansolutions', filename)):
return "fpxml", parse_footprint.parse
if (inputfmt == None and re.search(r'(whichoneis[.]com|beextra[.]org)',
filename)):
return "fpxml", parse_footprint.parse
if inputfmt == "idealist":
return "idealist", parse_idealist.parse
if (inputfmt == None and re.search(r'idealist', filename)):
return "fpxml", parse_footprint.parse
if (inputfmt == None and re.search(r'meetup', filename)):
return "fpxml", parse_footprint.parse
if (inputfmt == "fp_userpostings" or
(inputfmt == None and re.search(r'(userpostings|/export/Posting)',
filename))):
return "fp_userpostings", parse_userpostings.parse
if (inputfmt == "volunteermatch" or inputfmt == "vm" or
(inputfmt == None and re.search(r'volunteermatch', filename))):
return "volunteermatch", parse_volunteermatch.parse
print datetime.now(), "unknown input format-- try --inputfmt"
sys.exit(1)
def clean_input_string(instr):
"""run various cleanups for low-level encoding issues."""
def cleaning_progress(msg):
"""macro"""
print_progress(msg+": "+str(len(instr))+" bytes.")
cleaning_progress("read file")
instr = re.sub(r'\r\n?', "\n", instr)
cleaning_progress("filtered DOS newlines")
instr = re.sub(r'(?:\t|	)', " ", instr)
cleaning_progress("filtered tabs")
instr = re.sub(r'\xc2?[\x93\x94\222]', "'", instr)
cleaning_progress("filtered iso8859-1 single quotes")
instr = re.sub(r'\xc2?[\223\224]', '"', instr)
cleaning_progress("filtered iso8859-1 double quotes")
instr = re.sub(r'\xc2?[\225\226\227]', "-", instr)
cleaning_progress("filtered iso8859-1 dashes")
instr = xmlh.clean_string(instr)
cleaning_progress("filtered nonprintables")
instr = re.sub(r'&[a-z]+;', '', instr)
cleaning_progress("filtered weird X/HTML escapes")
return instr
def parse_options():
"""parse cmdline options"""
global DEBUG, PROGRESS, GEOCODE_DEBUG, FIELDSEP, RECORDSEP, ABRIDGED
global OUTPUTFMT
parser = OptionParser("usage: %prog [options] sample_data.xml ...")
parser.set_defaults(geocode_debug=False)
parser.set_defaults(debug=False)
parser.set_defaults(abridged=False)
parser.set_defaults(progress=False)
parser.set_defaults(debug_input=False)
parser.set_defaults(outputfmt="basetsv")
parser.set_defaults(output="")
parser.set_defaults(test=False)
parser.set_defaults(clean=True)
parser.set_defaults(maxrecs=-1)
parser.add_option("-d", "--dbg", action="store_true", dest="debug")
parser.add_option("--abridged", action="store_true", dest="abridged")
parser.add_option("--noabridged", action="store_false", dest="abridged")
parser.add_option("--clean", action="store_true", dest="clean")
parser.add_option("--noclean", action="store_false", dest="clean")
parser.add_option("--inputfmt", action="store", dest="inputfmt")
parser.add_option("--test", action="store_true", dest="test")
parser.add_option("--dbginput", action="store_true", dest="debug_input")
parser.add_option("--progress", action="store_true", dest="progress")
parser.add_option("--outputfmt", action="store", dest="outputfmt")
parser.add_option("--output", action="store", dest="output")
parser.add_option("-g", "--geodbg", action="store_true", dest="geocode_debug")
parser.add_option("--ftpinfo", dest="ftpinfo")
parser.add_option("--fs", "--fieldsep", action="store", dest="fs")
parser.add_option("--rs", "--recordsep", action="store", dest="rs")
parser.add_option("-n", "--maxrecords", action="store", dest="maxrecs")
(options, args) = parser.parse_args(sys.argv[1:])
if (len(args) == 0):
parser.print_help()
sys.exit(0)
if options.fs != None:
FIELDSEP = options.fs
if options.rs != None:
RECORDSEP = options.rs
if (options.debug):
DEBUG = True
GEOCODE_DEBUG = True
PROGRESS = True
FIELDSEP = "\n"
if (options.abridged):
ABRIDGED = True
if (options.geocode_debug):
GEOCODE_DEBUG = True
if options.test:
options.progress = True
if (options.progress):
PROGRESS = True
if options.ftpinfo and not options.outputfmt:
options.outputfmt = "basetsv"
OUTPUTFMT = options.outputfmt
return options, args
def open_input_filename(filename):
"""handle different file/URL opening methods."""
if re.search(r'^https?://', filename):
print_progress("starting download of "+filename)
outfh = urllib.urlopen(filename)
if (re.search(r'[.]gz$', filename)):
# is there a way to fetch and unzip an URL in one shot?
print_progress("ah, gzip format.")
content = outfh.read()
outfh.close()
print_progress("download done.")
tmp_fn = "/tmp/tmp-"+hashlib.md5().hexdigest()
tmpfh = open(tmp_fn, "wb+")
tmpfh.write(content)
tmpfh.close()
outfh = gzip.open(tmp_fn, 'rb')
return outfh
elif re.search(r'[.]gz$', filename):
return gzip.open(filename, 'rb')
elif filename == "-":
return sys.stdin
return open(filename, 'rb')
def test_parse(footprint_xmlstr, maxrecs):
"""run the data through and then re-parse the output."""
print datetime.now(), "testing input: generating Footprint XML..."
fpxml = convert_to_footprint_xml(footprint_xmlstr, True, int(maxrecs), True)
# free some RAM
del footprint_xmlstr
print datetime.now(), "testing input: parsing and regenerating FPXML..."
fpxml2 = convert_to_footprint_xml(fpxml, True, int(maxrecs), True)
print datetime.now(), "testing input: comparing outputs..."
hash1 = hashlib.md5(fpxml).hexdigest()
hash2 = hashlib.md5(fpxml2).hexdigest()
fn1 = "/tmp/pydiff-"+hash1
fn2 = "/tmp/pydiff-"+hash2
if hash1 == hash2:
print datetime.now(), "success: getting head...\n"
outfh = open(fn1, "w+")
outfh.write(fpxml)
outfh.close()
subprocess.call(['head', fn1])
else:
print datetime.now(), "errors-- hash1=" + hash1 + " hash2=" + \
hash2 + " running diff", fn1, fn2
outfh = open(fn1, "w+")
outfh.write(fpxml)
outfh.close()
outfh = open(fn2, "w+")
outfh.write(fpxml2)
outfh.close()
subprocess.call(['diff', fn1, fn2])
# grr-- difflib performance sucks
#for line in difflib.unified_diff(fpxml, fpxml2, \
# fromfile='(first output)', tofile='(second output)'):
#print line
def process_file(filename, options, providerName="", providerID="",
providerURL=""):
shortname = guess_shortname(filename)
inputfmt, parsefunc = guess_parse_func(options.inputfmt, filename)
infh = open_input_filename(filename)
print_progress("reading data...")
# don't put this inside open_input_filename() because it could be large
instr = infh.read()
print_progress("done reading data.")
# remove bad encodings etc.
if options.clean:
instr = clean_input_string(instr)
# split nasty XML inputs, to help isolate problems
if options.debug_input:
instr = re.sub(r'><', r'>\n<', instr)
print_progress("inputfmt: "+inputfmt)
print_progress("outputfmt: "+options.outputfmt)
print_status("input data: "+str(len(instr))+" bytes", shortname)
if inputfmt == "fpxml":
footprint_xmlstr = instr
else:
print_progress("parsing "+inputfmt+"...")
assert parsefunc != parse_footprint.parse
footprint_xmlstr, numorgs, numopps = \
parsefunc(instr, int(options.maxrecs), PROGRESS)
if (providerID != "" and
footprint_xmlstr.find('<providerID></providerID>')):
footprint_xmlstr = re.sub(
'<providerID></providerID>',
'<providerID>%s</providerID>' % providerID, footprint_xmlstr)
if (providerName != "" and
footprint_xmlstr.find('<providerName></providerName>')):
footprint_xmlstr = re.sub(
'<providerName></providerName>',
'<providerName>%s</providerName>' % providerName, footprint_xmlstr)
if (providerURL != "" and
footprint_xmlstr.find('<providerURL></providerURL>')):
footprint_xmlstr = re.sub(
'<providerURL></providerURL>',
'<providerURL>%s</providerURL>' % providerURL, footprint_xmlstr)
if options.test:
# free some RAM
del instr
test_parse(footprint_xmlstr, options.maxrecs)
sys.exit(0)
fastparse = not options.debug_input
if OUTPUTFMT == "fpxml":
# TODO: pretty printing option
print convert_to_footprint_xml(footprint_xmlstr, fastparse,
int(options.maxrecs), PROGRESS)
sys.exit(0)
if OUTPUTFMT != "basetsv":
print >> sys.stderr, datetime.now(), \
"--outputfmt not implemented: try 'basetsv','fpbasetsv' or 'fpxml'"
sys.exit(1)
outstr, numorgs, numopps = convert_to_gbase_events_type(
footprint_xmlstr, shortname, fastparse, int(options.maxrecs), PROGRESS)
return len(footprint_xmlstr), numorgs, numopps, outstr
def main():
"""main function for cmdline execution."""
start_time = datetime.now()
options, args = parse_options()
filename = args[0]
if re.search("spreadsheets[.]google[.]com", filename):
if OUTPUTFMT == "fpxml":
pgs.parser_error("FPXML format not supported for "+
"spreadsheet-of-spreadsheets")
sys.exit(1)
match = re.search(r'key=([^& ]+)', filename)
url = "http://spreadsheets.google.com/feeds/cells/" + match.group(1)
url += "/1/public/basic"
# to avoid hitting 80 columns
pgs = parse_gspreadsheet
data = {}
updated = {}
if PROGRESS:
print "processing spreadsheet", url
maxrow, maxcol = pgs.read_gspreadsheet(url, data, updated, PROGRESS)
header_row, header_startcol = pgs.find_header_row(data, 'provider name')
# check to see if there's a header-description row
header_desc = pgs.cellval(data, header_row+1, header_startcol)
if not header_desc:
pgs.parser_error("blank row not allowed below header row")
sys.exit(1)
header_desc = header_desc.lower()
data_startrow = header_row + 1
if header_desc.find("example") >= 0:
data_startrow += 1
bytes = numorgs = numopps = 0
outstr = ""
for row in range(data_startrow, int(maxrow)+1):
providerName = pgs.cellval(data, row, header_startcol)
providerID = pgs.cellval(data, row, header_startcol+1)
providerURL = pgs.cellval(data, row, header_startcol+2)
if providerName == "" or providerID == "" or providerURL == "":
continue
match = re.search(r'key=([^& ]+)', providerURL)
providerURL = "http://spreadsheets.google.com/feeds/cells/"
providerURL += match.group(1)
providerURL += "/1/public/basic"
if PROGRESS:
print "processing spreadsheet", providerURL, "name="+providerName
providerBytes, providerNumorgs, providerNumopps, tmpstr = process_file(
providerURL, options, providerName, providerID, providerURL)
bytes += providerBytes
numorgs += providerNumorgs
numopps += providerNumopps
outstr += tmpstr
else:
bytes, numorgs, numopps, outstr = process_file(filename, options)
#only need this if Base quoted fields it enabled
#outstr = re.sub(r'"', r'"', outstr)
if (options.ftpinfo):
ftp_to_base(filename, options.ftpinfo, outstr)
elif options.output == "":
print outstr,
else:
outfh = open(options.output, "w")
outfh.write(outstr)
outfh.close()
elapsed = datetime.now() - start_time
# NOTE: if you change this, you also need to update datahub/load_gbase.py
# and frontend/views.py to avoid breaking the dashboard-- other status
# messages don't matter.
shortname = guess_shortname(filename)
xmlh.print_status("done parsing: output " + str(numorgs) + " organizations" +
" and " + str(numopps) + " opportunities" +
" (" + str(bytes) + " bytes): " +
str(int(elapsed.seconds/60)) + " minutes.",
shortname, PROGRESS)
if __name__ == "__main__":
main()
| Python |
# Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
parser for SERVEnet
"""
import xml_helpers as xmlh
import re
from datetime import datetime
ORGS = {}
ORGIDS = {}
MAX_ORGID = 0
def register_org(orgname, orgstr):
"""register the organization info, for lookup later."""
global MAX_ORGID
if orgname in ORGIDS:
return ORGIDS[orgname]
MAX_ORGID = MAX_ORGID + 1
orgstr = '<Organization>'
orgstr += '<organizationID>%d</organizationID>' % (len(ORGIDS))
orgstr += '<nationalEIN></nationalEIN>'
orgstr += '<name>%s</name>' % (orgname)
orgstr += '<missionStatement></missionStatement>'
orgstr += '<description></description>'
orgstr += '<location>'
orgstr += xmlh.output_val("city", "")
orgstr += xmlh.output_val("region", "")
orgstr += xmlh.output_val("postalCode", "")
orgstr += '</location>'
orgstr += '<organizationURL></organizationURL>'
orgstr += '<donateURL></donateURL>'
orgstr += '<logoURL></logoURL>'
orgstr += '<detailURL></detailURL>'
orgstr += '</Organization>'
ORGS[MAX_ORGID] = orgstr
ORGIDS[orgname] = MAX_ORGID
return MAX_ORGID
# pylint: disable-msg=R0915
def parse(instr, maxrecs, progress):
"""return FPXML given servenet data"""
known_elnames = [
'Abstract', 'Categories', 'Category', 'CategoryID', 'Country', 'DateListed',
'Description', 'DetailURL', 'Duration', 'DurationQuantity', 'DurationUnit',
'EndDate', 'KeyWords', 'LocalID', 'Location', 'LocationClassification',
'LocationClassificationID', 'LocationClassifications', 'Locations',
'LogoURL', 'Name', 'OpportunityDate', 'OpportunityDates', 'OpportunityType',
'OpportunityTypeID', 'SponsoringOrganization', 'SponsoringOrganizations',
'StartDate', 'StateOrProvince', 'Title', 'VolunteerOpportunity',
'ZipOrPostalCode' ]
numorgs = numopps = 0
instr = re.sub(r'<(/?db):', r'<\1_', instr)
opps = re.findall(r'<VolunteerOpportunity>.+?</VolunteerOpportunity>',
instr, re.DOTALL)
volopps = ""
for i, oppstr in enumerate(opps):
if progress and i > 0 and i % 250 == 0:
print str(datetime.now())+": ", i, " opportunities processed."
if (maxrecs > 0 and i > maxrecs):
break
xmlh.print_rps_progress("opps", progress, i, maxrecs)
item = xmlh.simple_parser(oppstr, known_elnames, progress=False)
# SponsoringOrganization/Name -- fortunately, no conflicts
# but there's no data except the name
orgname = xmlh.get_tag_val(item, "Name")
orgid = register_org(orgname, orgname)
# logoURL -- sigh, this is for the opportunity not the org
volopps += '<VolunteerOpportunity>'
volopps += xmlh.output_val('volunteerOpportunityID', str(i))
volopps += xmlh.output_val('sponsoringOrganizationID', str(orgid))
volopps += xmlh.output_node('volunteerHubOrganizationID', item, "LocalID")
volopps += xmlh.output_node('title', item, "Title")
volopps += xmlh.output_node('abstract', item, "Abstract")
volopps += xmlh.output_node('description', item, "Description")
volopps += xmlh.output_node('detailURL', item, "DetailURL")
volopps += xmlh.output_val('volunteersNeeded', "-8888")
oppdates = item.getElementsByTagName("OpportunityDate")
if oppdates.length > 1:
print datetime.now(), \
"parse_servenet.py: only 1 OpportunityDate supported."
#return None
oppdate = oppdates[0]
elif oppdates.length == 0:
oppdate = None
else:
oppdate = oppdates[0]
volopps += '<dateTimeDurations><dateTimeDuration>'
if oppdate:
volopps += xmlh.output_val('openEnded', 'No')
volopps += xmlh.output_val('duration', 'P%s%s' %
(xmlh.get_tag_val(oppdate, "DurationQuantity"),
xmlh.get_tag_val(oppdate, "DurationUnit")))
volopps += xmlh.output_val('commitmentHoursPerWeek', '0')
volopps += xmlh.output_node('startDate', oppdate, "StartDate")
volopps += xmlh.output_node('endDate', oppdate, "EndDate")
else:
volopps += xmlh.output_val('openEnded', 'Yes')
volopps += xmlh.output_val('commitmentHoursPerWeek', '0')
volopps += '</dateTimeDuration></dateTimeDurations>'
volopps += '<locations>'
opplocs = item.getElementsByTagName("Location")
for opploc in opplocs:
volopps += '<location>'
volopps += xmlh.output_node('region', opploc, "StateOrProvince")
volopps += xmlh.output_node('country', opploc, "Country")
volopps += xmlh.output_node('postalCode', opploc, "ZipOrPostalCode")
volopps += '</location>'
volopps += '</locations>'
volopps += '<categoryTags/>'
volopps += '</VolunteerOpportunity>'
numopps += 1
# convert to footprint format
outstr = '<?xml version="1.0" ?>'
outstr += '<FootprintFeed schemaVersion="0.1">'
outstr += '<FeedInfo>'
# TODO: assign provider IDs?
outstr += xmlh.output_val('providerID', '114')
outstr += xmlh.output_val('providerName', 'servenet')
outstr += xmlh.output_val('feedID', 'servenet')
outstr += xmlh.output_val('createdDateTime', xmlh.current_ts())
outstr += xmlh.output_val('providerURL', 'http://www.servenet.org/')
outstr += xmlh.output_val('description', 'servenet')
# TODO: capture ts -- use now?!
outstr += '</FeedInfo>'
# hardcoded: Organization
outstr += '<Organizations>'
for key in ORGS:
outstr += ORGS[key]
numorgs += 1
outstr += '</Organizations>'
outstr += '<VolunteerOpportunities>'
outstr += volopps
outstr += '</VolunteerOpportunities>'
outstr += '</FootprintFeed>'
#outstr = re.sub(r'><([^/])', r'>\n<\1', outstr)
return outstr, numorgs, numopps
| Python |
#!/usr/bin/env python
#
"""
script for loading into googlebase.
Usage: load_gbase.py username password
"""
import sys
import re
import logging
import subprocess
from datetime import datetime
import footprint_lib
USERNAME = ""
PASSWORD = ""
LOGPATH = "/home/footprint/public_html/datahub/dashboard/"
LOG_FN = LOGPATH + "load_gbase.log"
DETAILED_LOG_FN = LOGPATH + "load_gbase_detail.log"
# this file needs to be copied over to frontend/autocomplete/
POPULAR_WORDS_FN = "popular_words.txt"
FIELD_STATS_FN = "field_stats.txt"
STOPWORDS = set([
'a', 'about', 'above', 'across', 'after', 'afterwards', 'again', 'against',
'all', 'almost', 'alone', 'along', 'already', 'also', 'although', 'always',
'am', 'among', 'amongst', 'amoungst', 'amount', 'an', 'and', 'another', 'any',
'anyhow', 'anyone', 'anything', 'anyway', 'anywhere', 'are', 'around', 'as',
'at', 'back', 'be', 'became', 'because', 'become', 'becomes', 'becoming',
'been', 'before', 'beforehand', 'behind', 'being', 'below', 'beside',
'besides', 'between', 'beyond', 'bill', 'both', 'bottom', 'but', 'by', 'call',
'can', 'cannot', 'cant', 'co', 'computer', 'con', 'could', 'couldnt', 'cry',
'de', 'describe', 'detail', 'do', 'done', 'down', 'due', 'during', 'each',
'eg', 'eight', 'either', 'eleven', 'else', 'elsewhere', 'empty', 'enough',
'etc', 'even', 'ever', 'every', 'everyone', 'everything', 'everywhere',
'except', 'few', 'fifteen', 'fify', 'fill', 'find', 'fire', 'first', 'five',
'for', 'former', 'formerly', 'forty', 'found', 'four', 'from', 'front','full',
'further', 'get', 'give', 'go', 'had', 'has', 'hasnt', 'have', 'he', 'hence',
'her', 'here', 'hereafter', 'hereby', 'herein', 'hereupon', 'hers', 'herself',
'him', 'himself', 'his', 'how', 'however', 'hundred', 'i', 'ie', 'if', 'in',
'inc', 'indeed', 'interest', 'into', 'is', 'it', 'its', 'itself', 'keep',
'last', 'latter', 'latterly', 'least', 'less', 'ltd', 'made', 'many', 'may',
'me', 'meanwhile', 'might', 'mill', 'mine', 'more', 'moreover', 'most',
'mostly', 'move', 'much', 'must', 'my', 'myself', 'name', 'namely', 'neither',
'never', 'nevertheless', 'next', 'nine', 'no', 'nobody', 'none', 'noone',
'nor', 'not', 'nothing', 'now', 'nowhere', 'of', 'off', 'often', 'on', 'once',
'one', 'only', 'onto', 'or', 'other', 'others', 'otherwise', 'our', 'ours',
'ourselves', 'out', 'over', 'own', 'part', 'per', 'perhaps', 'please', 'put',
'rather', 're', 'same', 'see', 'seem', 'seemed', 'seeming', 'seems',
'serious', 'several', 'she', 'should', 'show', 'side', 'since', 'sincere',
'six', 'sixty', 'so', 'some', 'somehow', 'someone', 'something', 'sometime',
'sometimes', 'somewhere', 'still', 'such', 'system', 'take', 'ten', 'than',
'that', 'the', 'their', 'them', 'themselves', 'then', 'thence', 'there',
'thereafter', 'thereby', 'therefore', 'therein', 'thereupon', 'these', 'they',
'thick', 'thin', 'third', 'this', 'those', 'though', 'three', 'through',
'throughout', 'thru', 'thus', 'to', 'together', 'too', 'top', 'toward',
'towards', 'twelve', 'twenty', 'two', 'un', 'under', 'until', 'up', 'upon',
'us', 'very', 'via', 'was', 'we', 'well', 'were', 'what', 'whatever', 'when',
'whence', 'whenever', 'where', 'whereafter', 'whereas', 'whereby', 'wherein',
'whereupon', 'wherever', 'whether', 'which', 'while', 'whither', 'who',
'whoever', 'whole', 'whom', 'whose', 'why', 'will', 'with', 'within',
'without', 'would', 'yet', 'you', 'your', 'yours', 'yourself', 'yourselves',
# custom stopwords for footprint
'url', 'amp', 'quot', 'help', 'http', 'search', 'nbsp', 'need', 'cache',
'vol', 'housingall', 'wantedall', 'personalsall', 'net', 'org', 'www',
'gov', 'yes', 'no', '999',
])
def print_progress(msg):
"""print progress message-- shutup pylint"""
print str(datetime.now())+": "+msg
KNOWN_WORDS = {}
def process_popular_words(content):
"""update the dictionary of popular words."""
# TODO: handle phrases (via whitelist, then later do something smart.
print_progress("cleaning content: %d bytes" % len(content))
cleaner_regexp = re.compile('<[^>]*>', re.DOTALL)
cleaned_content = re.sub(cleaner_regexp, '', content).lower()
print_progress("splitting words, %d bytes" % len(cleaned_content))
words = re.split(r'[^a-zA-Z0-9]+', cleaned_content)
print_progress("loading words")
for word in words:
# ignore common english words
if word in STOPWORDS:
continue
# ignore short words
if len(word) <= 2:
continue
if word not in KNOWN_WORDS:
KNOWN_WORDS[word] = 0
KNOWN_WORDS[word] += 1
print_progress("cleaning rare words from %d words" % len(KNOWN_WORDS))
# clean to reduce ram needs
for word in KNOWN_WORDS.keys():
if KNOWN_WORDS[word] < 2:
del KNOWN_WORDS[word]
print_progress("done: word dict size %d words" % len(KNOWN_WORDS))
def print_word_stats():
"""dump word stats."""
print_progress("final cleanse: keeping only words appearing 10 times")
for word in KNOWN_WORDS.keys():
if KNOWN_WORDS[word] < 10:
del KNOWN_WORDS[word]
sorted_words = list(KNOWN_WORDS.iteritems())
sorted_words.sort(cmp=lambda a, b: cmp(b[1], a[1]))
print_progress("writing "+POPULAR_WORDS_FN+"...")
popfh = open(POPULAR_WORDS_FN, "w")
for word, freq in sorted_words:
popfh.write(str(freq)+"\t"+word+"\n")
popfh.close()
print_progress("done writing "+POPULAR_WORDS_FN)
FIELD_VALUES = None
FIELD_NAMES = None
NUM_RECORDS_TOTAL = 0
def process_field_stats(content):
"""update the field-value histograms."""
global FIELD_NAMES, FIELD_VALUES, NUM_RECORDS_TOTAL
for lineno, line in enumerate(content.splitlines()):
fields = line.split("\t")
if lineno == 0:
if FIELD_NAMES == None:
FIELD_NAMES = fields
FIELD_VALUES = [{} for i in range(len(fields))]
continue
NUM_RECORDS_TOTAL += 1
for i, val in enumerate(fields):
val = val[0:300]
if val in FIELD_VALUES[i]:
FIELD_VALUES[i][val] += 1
else:
FIELD_VALUES[i][val] = 1
def print_field_stats():
"""dump field-value stats."""
print_progress("writing "+FIELD_STATS_FN+"...")
outfh = open(FIELD_STATS_FN, "w")
outfh.write("number of records: "+str(NUM_RECORDS_TOTAL)+"\n")
for i, fieldname in enumerate(FIELD_NAMES):
outfh.write("field "+fieldname+":\n")
sorted_vals = list(FIELD_VALUES[i].iteritems())
sorted_vals.sort(cmp=lambda a, b: cmp(b[1], a[1]))
for val, freq in sorted_vals[0:1000]:
if freq < 10:
break
outfh.write(" %5d %s\n" % (freq, val))
outfh.close()
print_progress("done writing "+FIELD_STATS_FN)
def append_log(outstr):
"""append to the detailed and truncated log, for stats collection."""
outfh = open(DETAILED_LOG_FN, "a")
outfh.write(outstr)
outfh.close()
outfh = open(LOG_FN, "a")
for line in outstr.split('\n'):
if re.search(r'(STATUS|ERROR)', line):
outfh.write(line+"\n")
outfh.close()
def error_exit(msg):
"""Print an error message to stderr and exit."""
print >> sys.stderr, msg
sys.exit(1)
# Use a shell for subcommands on Windows to get a PATH search.
USE_SHELL = sys.platform.startswith("win")
def run_shell_with_retcode(command, print_output=False,
universal_newlines=True):
"""Executes a command and returns the output from stdout and the return code.
Args:
command: Command to execute.
print_output: If True, the output is printed to stdout.
If False, both stdout and stderr are ignored.
universal_newlines: Use universal_newlines flag (default: True).
Returns:
Tuple (output, return code)
"""
logging.info("Running %s", command)
proc = subprocess.Popen(command, stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=USE_SHELL,
universal_newlines=universal_newlines)
if print_output:
output_array = []
while True:
line = proc.stdout.readline()
if not line:
break
print line.strip("\n")
output_array.append(line)
output = "".join(output_array)
else:
output = proc.stdout.read()
proc.wait()
errout = proc.stderr.read()
if print_output and errout:
print >> sys.stderr, errout
proc.stdout.close()
proc.stderr.close()
append_log(output)
append_log(errout)
return output, errout, proc.returncode
def run_shell(command, silent_ok=False, universal_newlines=True,
print_output=False):
"""run a shell command."""
stdout, stderr, retcode = run_shell_with_retcode(command, print_output,
universal_newlines)
#if retcode and retcode != 0:
#error_exit("Got error status from %s:\n%s\n%s" % (command, stdout, stderr))
if not silent_ok and not stdout:
error_exit("No output from %s" % command)
return stdout, stderr, retcode
def load_gbase(name, url):
"""shutup pylint."""
print_progress("loading "+name+" from "+url)
# run as a subprocess so we can ignore failures and keep going.
# later, we'll run these concurrently, but for now we're RAM-limited.
# ignore retcode
tsv_filename = "out-"+name+".tsv"
stdout, stderr, retcode = run_shell(["./footprint_lib.py", "--progress",
#"--ftpinfo", USERNAME+":"+PASSWORD,
"--output", tsv_filename, url],
silent_ok=True, print_output=False)
print stdout,
if stderr and stderr != "":
print name+":STDERR: ", re.sub(r'\n', '\n'+name+':STDERR: ', stderr)
if retcode and retcode != 0:
print name+":RETCODE: "+str(retcode)
infh = open(tsv_filename, "r")
tsv_data = infh.read()
infh.close()
process_field_stats(tsv_data)
process_popular_words(tsv_data)
print_progress("ftp'ing to base")
footprint_lib.ftp_to_base(name, USERNAME+":"+PASSWORD, tsv_data)
print_progress("load_gbase: done.")
def test_loaders():
"""for testing, read from local disk as much as possible."""
load_gbase("servenet", "servenet.xml")
load_gbase("unitedway", "unitedway.xml")
load_gbase("americansolutions", "americansolutions.xml")
load_gbase("meetup", "meetup.xml")
load_gbase("extraordinaries", "beextra-extraordinaries.xml")
load_gbase("idealist", "idealist.xml")
load_gbase("gspreadsheets",
"https://spreadsheets.google.com/ccc?key=rOZvK6aIY7HgjO-hSFKrqMw")
load_gbase("craigslist", "craigslist-cache.txt")
load_gbase("americorps", "americorps-xml_ac_recruitopps.xml.gz")
load_gbase("volunteer.gov", "volunteergov.xml")
load_gbase("handson", "hot.footprint.xml.gz")
def loaders():
"""put all loaders in one function for easier testing."""
load_gbase("mybarackobama",
"http://my.barackobama.com/page/event/search_results?"+
"format=footprint")
load_gbase("servenet",
"http://servenet.org/test/temp/SERVEnetOpportunities001.xml")
load_gbase("unitedway",
"http://volunteer.united-e-way.org/"+
"uwnyc/util/voml/uwnyc-footprint-pull.aspx")
load_gbase("habitat", "http://www.habitat.org/cd/gv/schedule_to_xml.aspx")
load_gbase("americansolutions",
"http://www.americansolutions.com/footprint/footprint.xml")
load_gbase("meetup", "http://api.meetup.com/footprint?"+
"key=2c24625a70343bb68451e337e714b22")
# old custom feed
#load_gbase("idealist", "http://feeds.idealist.org/xml/feeds/"+
# "Idealist-VolunteerOpportunity-VOLUNTEER_OPPORTUNITY_TYPE."+
# "en.open.atom.gz")
load_gbase("extraordinaries", "http://app.beextra.org/opps/list/format/xml")
load_gbase("idealist", "http://feeds.idealist.org/xml/"+
"footprint-volunteer-opportunities.xml")
load_gbase("gspreadsheets",
"https://spreadsheets.google.com/ccc?key=rOZvK6aIY7HgjO-hSFKrqMw")
# note: craiglist crawler is run async to this
load_gbase("craigslist", "craigslist-cache.txt")
load_gbase("americorps",
"http://www.americorps.gov/xmlfeed/xml_ac_recruitopps.xml.gz")
load_gbase("volunteer.gov", "http://www.volunteer.gov/footprint.xml")
load_gbase("handson",
"http://archive.handsonnetwork.org/feeds/hot.footprint.xml.gz")
def main():
"""shutup pylint."""
global USERNAME, PASSWORD
if len(sys.argv) < 3:
print "Usage:", sys.argv[0], "<gbase username> <password>"
sys.exit(1)
USERNAME = sys.argv[1]
PASSWORD = sys.argv[2]
#test_loaders()
loaders()
print_word_stats()
print_field_stats()
if __name__ == "__main__":
main()
| Python |
# Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# key status description title creation_time quality_score id listing_xml start_date
# 2002 50.NEW Help clean up the neighborhood . Logan Circle Cleanup 2009-03-27 19:16:48.779077 1.0 fc0917845283fe0969432877615eb4f3 <VolunteerOpportunity><title>Logan Circle Cleanup</title><description>Help clean up the neighborhood .</description><skills>Wear closed toe shoes and sturdy clothing.</skills><minimumAge>0</minimumAge><detailURL></detailURL><locations><location><name>Logan circle</name><city>washington</city><region></region><postalCode></postalCode><country>US</country><latitude>38.9127600</latitude><longitude>-77.0272440</longitude></location></locations><dateTimeDurations><dateTimeDuration><openEnded>Yes</openEnded><commitmentHoursPerWeek>0</commitmentHoursPerWeek></dateTimeDuration></dateTimeDurations></VolunteerOpportunity> 2009-03-27
from xml.dom import minidom
import xml_helpers as xmlh
import re
import parse_footprint
from datetime import datetime
known_elnames = [ 'FeedInfo', 'FootprintFeed', 'Organization', 'Organizations', 'VolunteerOpportunities', 'VolunteerOpportunity', 'abstract', 'audienceTag', 'audienceTags', 'categoryTag', 'categoryTags', 'city', 'commitmentHoursPerWeek', 'contactEmail', 'contactName', 'contactPhone', 'country', 'createdDateTime', 'dateTimeDuration', 'dateTimeDurationType', 'dateTimeDurations', 'description', 'detailURL', 'directions', 'donateURL', 'duration', 'email', 'endDate', 'endTime', 'expires', 'fax', 'feedID', 'guidestarID', 'iCalRecurrence', 'language', 'latitude', 'lastUpdated', 'location', 'locationType', 'locations', 'logoURL', 'longitude', 'minimumAge', 'missionStatement', 'name', 'nationalEIN', 'openEnded', 'organizationID', 'organizationURL', 'paid', 'phone', 'postalCode', 'providerID', 'providerName', 'providerURL', 'region', 'schemaVersion', 'sexRestrictedEnum', 'sexRestrictedTo', 'skills', 'sponsoringOrganizationID', 'startDate', 'startTime', 'streetAddress1', 'streetAddress2', 'streetAddress3', 'title', 'tzOlsonPath', 'virtual', 'volunteerHubOrganizationID', 'volunteerOpportunityID', 'volunteersFilled', 'volunteersSlots', 'volunteersNeeded', 'yesNoEnum', ]
# pylint: disable-msg=R0915
def parse(instr, maxrecs, progress):
"""return FPXML given FP user postings data"""
# ignore unapproved opportunities
instr = re.sub(r'^.+REJECTED\t.+$', r'', instr)
xmlh.print_status("parse_userpostings.Parse: starting parse...")
# convert to footprint format
s = '<?xml version="1.0" ?>'
s += '<FootprintFeed schemaVersion="0.1">'
s += '<FeedInfo>'
# TODO: assign provider IDs?
s += '<providerID>108</providerID>'
s += '<providerName>footprint</providerName>'
s += '<feedID>1</feedID>'
s += '<createdDateTime>%s</createdDateTime>' % xmlh.current_ts()
s += '<providerURL>http://sites.google.com/site/footprintorg/</providerURL>'
s += '<description></description>'
# TODO: capture ts -- use now?!
s += '</FeedInfo>'
# hardcoded: Organization
s += '<Organizations>'
sponsor_ids = {}
sponsorstrs = re.findall(r'<SponsoringOrganization>.+?</SponsoringOrganization>', instr, re.DOTALL)
numorgs = numopps = 0;
for i,orgstr in enumerate(sponsorstrs):
if progress and i > 0 and i % 250 == 0:
print datetime.now(), ": ", i, " orgs processed."
org = xmlh.simple_parser(orgstr, known_elnames, False)
#sponsors = xmldoc.getElementsByTagName("SponsoringOrganization")
#for i,org in enumerate(sponsors):
s += '<Organization>'
name = xmlh.get_tag_val(org, "Name")
desc = xmlh.get_tag_val(org, "Description")
s += '<organizationID>%d</organizationID>' % (i+1)
s += '<nationalEIN></nationalEIN>'
s += '<name>%s</name>' % (xmlh.get_tag_val(org, "Name"))
s += '<missionStatement></missionStatement>'
s += '<description>%s</description>' % (xmlh.get_tag_val(org, "Description"))
# unmapped: Email
# unmapped: Phone
# unmapped: Extension
s += '<location>'
#s += '<city>%s</city>' % (xmlh.get_tag_val(org, "City"))
#s += '<region>%s</region>' % (xmlh.get_tag_val(org, "State"))
#s += '<postalCode>%s</postalCode>' % (xmlh.get_tag_val(org, "PostalCode"))
s += '<country>%s</country>' % (xmlh.get_tag_val(org, "Country"))
s += '</location>'
s += '<organizationURL>%s</organizationURL>' % (xmlh.get_tag_val(org, "URL"))
s += '<donateURL></donateURL>'
s += '<logoURL></logoURL>'
s += '<detailURL></detailURL>'
s += '</Organization>'
numorgs += 1
sponsor_ids[name+desc] = i+1
s += '</Organizations>'
s += '<VolunteerOpportunities>'
xmlh.print_status("parse_userpostings.Parse: finding VolunteerOpportunities...")
opps = re.findall(r'<VolunteerOpportunity>.+?</VolunteerOpportunity>', instr, re.DOTALL)
totrecs = 0
for i,oppstr in enumerate(opps):
if (maxrecs>0 and i>maxrecs):
break
xmlh.print_rps_progress("opps", progress, i, maxrecs)
opp = xmlh.simple_parser(oppstr, known_elnames, False)
orgs = opp.getElementsByTagName("SponsoringOrganization")
if orgs:
name = xmlh.get_tag_val(orgs[0], "Name")
desc = xmlh.get_tag_val(orgs[0], "Description")
sponsor_id = sponsor_ids[name+desc]
else:
name = ""
desc = ""
sponsor_id = 0
oppdates = opp.getElementsByTagName("OpportunityDate")
if (oppdates == None or oppdates.count == 0):
oppdates = [ None ]
else:
# unmapped: LogoURL
# unmapped: OpportunityTypeID (categoryTag?)
# unmapped: LocationClassificationID (flatten)
outstr_for_all_dates_pre = '<volunteerOpportunityID>%s</volunteerOpportunityID>' % (xmlh.get_tag_val(opp, "LocalID"))
outstr_for_all_dates_pre += '<sponsoringOrganizationIDs><sponsoringOrganizationID>%s</sponsoringOrganizationID></sponsoringOrganizationIDs>' % (sponsor_id)
# unmapped: OrgLocalID
outstr_for_all_dates_pre += '<volunteerHubOrganizationIDs><volunteerHubOrganizationID>%s</volunteerHubOrganizationID></volunteerHubOrganizationIDs>' % (xmlh.get_tag_val(opp, "AffiliateID"))
outstr_for_all_dates_pre += '<title>%s</title>' % (xmlh.get_tag_val(opp, "Title"))
outstr_for_all_dates_pre += '<abstract></abstract>'
outstr_for_all_dates_pre += '<volunteersNeeded>-8888</volunteersNeeded>'
locations = opp.getElementsByTagName("location")
if (locations.length != 1):
print datetime.now(), "parse_userpostings: only 1 location supported."
return None
loc = locations[0]
outstr_for_all_dates_post = '<locations><location>'
# yuck, uses address1 for venue name... sometimes... no way to detect: presence of numbers?
outstr_for_all_dates_post += '<streetAddress1>%s</streetAddress1>' % (xmlh.get_tag_val(loc, "Address1"))
outstr_for_all_dates_post += '<streetAddress2>%s</streetAddress2>' % (xmlh.get_tag_val(loc, "Address2"))
outstr_for_all_dates_post += '<city>%s</city>' % (xmlh.get_tag_val(loc, "city"))
outstr_for_all_dates_post += '<region>%s</region>' % (xmlh.get_tag_val(loc, "region"))
outstr_for_all_dates_post += '<country>%s</country>' % (xmlh.get_tag_val(loc, "country"))
outstr_for_all_dates_post += '<postalCode>%s</postalCode>' % (xmlh.get_tag_val(loc, "postalCode"))
outstr_for_all_dates_post += '<latitude>%s</latitude>' % (xmlh.get_tag_val(loc, "latitude"))
outstr_for_all_dates_post += '<longitude>%s</longitude>' % (xmlh.get_tag_val(loc, "longitude"))
# no equivalent: latitude, longitude
outstr_for_all_dates_post += '</location></locations>'
outstr_for_all_dates_post += '<detailURL>%s</detailURL>' % (xmlh.get_tag_val(opp, "DetailURL"))
outstr_for_all_dates_post += '<description>%s</description>' % (xmlh.get_tag_val(opp, "Description"))
outstr_for_all_dates_post += '<lastUpdated>%sT00:00:00</lastUpdated>' % (xmlh.get_tag_val(opp, "DateListed"))
oppcount = 0
dtds = ''
for oppdate in oppdates:
oppcount = oppcount + 1
if progress:
totrecs = totrecs + 1
if totrecs % 250 == 0:
print datetime.now(), ": ", totrecs, " records generated."
dtds += '<dateTimeDuration>'
if oppdate == None:
dtds += '<openEnded>Yes</openEnded>'
else:
dtds += '<openEnded>No</openEnded>'
# hardcoded: commitmentHoursPerWeek
dtds += '<commitmentHoursPerWeek>0</commitmentHoursPerWeek>'
# TODO: timezone
dtds += '<startDate>%s</startDate>' % (xmlh.get_tag_val(oppdate, "startDate"))
dtds += '<endDate>%s</endDate>' % (xmlh.get_tag_val(oppdate, "endDate"))
dtds += '<startTime>%s</startTime>' % (xmlh.get_tag_val(oppdate, "startTime"))
dtds += '<endTime>%s</endTime>' % (xmlh.get_tag_val(oppdate, "endTime"))
dtds += '</dateTimeDuration>'
if oppcount == 0: # insert an open ended datetimeduration
dtds = '<dateTimeDuration><openEnded>Yes</openEnded></dateTimeDuration>'
s += '<VolunteerOpportunity>'
s += outstr_for_all_dates_pre
s += '<dateTimeDurations>';
s += dtds
s += '</dateTimeDurations>';
s += outstr_for_all_dates_post
s += '</VolunteerOpportunity>'
numopps += 1
xmlh.print_status("parse_userpostings.Parse: done with VolunteerOpportunities...")
s += '</VolunteerOpportunities>'
s += '</FootprintFeed>'
#s = re.sub(r'><([^/])', r'>\n<\1', s)
return s, numorgs, numopps
if __name__ == "__main__":
sys = __import__('sys')
# tests go here
| Python |
# Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
parser for idealist, which (IIRC) originates from Base?
"""
import xml_helpers as xmlh
import re
from datetime import datetime
import xml.sax.saxutils
import dateutil.parser
# xml parser chokes on namespaces, and since we don't need them,
# just replace them for simplicity-- note that this also affects
# the code below
def remove_g_namespace(s, progress):
if progress:
print datetime.now(), "removing g: namespace..."
s = re.sub(r'<(/?)g:', r'<\1gg_', s)
if progress:
print datetime.now(), "removing awb: namespace..."
s = re.sub(r'<(/?)awb:', r'<\1awb_', s)
return s
def addCdataToContent(s, progress):
# what if CDATA is already used?!
if progress:
print datetime.now(), "adding CDATA to <content>..."
## yuck: this caused a RAM explosion...
#rx = re.compile(r'<content( *?[^>]*?)>(.+?)</content>', re.DOTALL)
#s = re.sub(rx, r'<content\1><![CDATA[\2]]></content>', s)
s = re.sub(r'<content([^>]+)>', r'<content\1><![CDATA[', s)
if progress:
print datetime.now(), "adding ]]> to </content>..."
s = re.sub(r'</content>', r']]></content>', s)
if progress:
print datetime.now(), "done: ", len(s), " bytes"
return s
def removeContentWrapperDiv(s):
return re.sub(r'(.*?<div.*?>|</div>)', '', s).strip()
# frees memory for main parse
def ParseHelper(instr, maxrecs, progress):
# TODO: progress
known_elnames = ['feed', 'title', 'subtitle', 'div', 'span', 'updated', 'id', 'link', 'icon', 'logo', 'author', 'name', 'uri', 'email', 'rights', 'entry', 'published', 'gg_publish_date', 'gg_expiration_date', 'gg_event_date_range', 'gg_start', 'gg_end', 'updated', 'category', 'summary', 'content', 'awb_city', 'awb_country', 'awb_state', 'awb_postalcode', 'gg_location', 'gg_age_range', 'gg_employer', 'gg_job_type', 'gg_job_industry', 'awb_paid', ]
# takes forever
#xmldoc = xmlh.simple_parser(s, known_elnames, progress)
# convert to footprint format
s = '<?xml version="1.0" ?>'
s += '<FootprintFeed schemaVersion="0.1">'
s += '<FeedInfo>'
# TODO: assign provider IDs?
s += '<feedID>1</feedID>'
s += '<providerID>103</providerID>'
s += '<providerName>idealist.org</providerName>'
s += '<providerURL>http://www.idealist.org/</providerURL>'
match = re.search(r'<title>(.+?)</title>', instr, re.DOTALL)
if match:
s += '<description>%s</description>' % (match.group(1))
s += '<createdDateTime>%s</createdDateTime>' % xmlh.current_ts()
s += '</FeedInfo>'
numorgs = numopps = 0
# hardcoded: Organization
s += '<Organizations>'
#authors = xmldoc.getElementsByTagName("author")
organizations = {}
authors = re.findall(r'<author>.+?</author>', instr, re.DOTALL)
for i, orgstr in enumerate(authors):
if progress and i > 0 and i % 250 == 0:
print datetime.now(), ": ", i, " orgs processed."
org = xmlh.simple_parser(orgstr, known_elnames, False)
s += '<Organization>'
s += '<organizationID>%d</organizationID>' % (i+1)
s += '<nationalEIN></nationalEIN>'
s += '<guidestarID></guidestarID>'
name = xmlh.get_tag_val(org, "name")
organizations[name] = i+1
s += '<name>%s</name>' % (organizations[name])
s += '<missionStatement></missionStatement>'
s += '<description></description>'
s += '<location><city></city><region></region><postalCode></postalCode></location>'
s += '<organizationURL>%s</organizationURL>' % (xmlh.get_tag_val(org, "uri"))
s += '<donateURL></donateURL>'
s += '<logoURL></logoURL>'
s += '<detailURL></detailURL>'
s += '</Organization>'
numorgs += 1
s += '</Organizations>'
s += '<VolunteerOpportunities>'
entries = re.findall(r'<entry>.+?</entry>', instr, re.DOTALL)
#entries = xmldoc.getElementsByTagName("entry")
#if (maxrecs > entries.length):
# maxrecs = entries.length
#for opp in entries[0:maxrecs-1]:
for i, oppstr in enumerate(entries):
if (maxrecs>0 and i>maxrecs):
break
xmlh.print_rps_progress("opps", progress, i, maxrecs)
opp = xmlh.simple_parser(oppstr, known_elnames, False)
# unmapped: db:rsvp (seems to be same as link, but with #rsvp at end of url?)
# unmapped: db:host (no equivalent?)
# unmapped: db:county (seems to be empty)
# unmapped: attendee_count
# unmapped: guest_total
# unmapped: db:title (dup of title, above)
# unmapped: contactName
s += '<VolunteerOpportunity>'
id_link = xmlh.get_tag_val(opp, "id")
s += '<volunteerOpportunityID>%s</volunteerOpportunityID>' % (id_link)
orgname = xmlh.get_tag_val(org, "name") # ok to be lazy-- no other 'name's in this feed
s += '<sponsoringOrganizationIDs><sponsoringOrganizationID>%s</sponsoringOrganizationID></sponsoringOrganizationIDs>' % (organizations[orgname])
# hardcoded: volunteerHubOrganizationID
s += '<volunteerHubOrganizationIDs><volunteerHubOrganizationID>0</volunteerHubOrganizationID></volunteerHubOrganizationIDs>'
s += '<title>%s</title>' % (xmlh.get_tag_val(opp, "title"))
# lazy: id is the same as the link field...
s += '<detailURL>%s</detailURL>' % (id_link)
# lazy: idealist stuffs a div in the content...
entry_content = xmlh.get_tag_val(opp, 'content')
s += '<description>%s</description>' % removeContentWrapperDiv(entry_content)
s += '<abstract>%s</abstract>' % (xmlh.get_tag_val(opp, "summary"))
pubdate = xmlh.get_tag_val(opp, "published")
ts = dateutil.parser.parse(pubdate)
pubdate = ts.strftime("%Y-%m-%dT%H:%M:%S")
s += '<lastUpdated>%s</lastUpdated>' % (pubdate)
s += '<expires>%sT23:59:59</expires>' % (xmlh.get_tag_val(opp, "gg_expiration_date"))
dbevents = opp.getElementsByTagName("gg_event_date_range")
if (dbevents.length != 1):
print datetime.now(), "parse_idealist: only 1 db:event supported."
return None
s += '<locations><location>'
# yucko: idealist is stored in Google Base, which only has 'location'
# so we stuff it into the city field, knowing that it'll just get
# concatenated down the line...
s += '<city>%s</city>' % (xmlh.get_tag_val(opp, "gg_location"))
s += '</location></locations>'
dbscheduledTimes = opp.getElementsByTagName("gg_event_date_range")
if (dbscheduledTimes.length != 1):
print datetime.now(), "parse_usaservice: only 1 gg_event_date_range supported."
return None
dbscheduledTime = dbscheduledTimes[0]
s += '<dateTimeDurations><dateTimeDuration>'
s += '<openEnded>No</openEnded>'
# ignore duration
# ignore commitmentHoursPerWeek
tempdate = xmlh.get_tag_val(dbscheduledTime, "gg_start")
ts = dateutil.parser.parse(tempdate)
tempdate = ts.strftime("%Y-%m-%d")
s += '<startDate>%s</startDate>' % (tempdate)
tempdate = xmlh.get_tag_val(dbscheduledTime, "gg_end")
ts = dateutil.parser.parse(tempdate)
tempdate = ts.strftime("%Y-%m-%d")
s += '<endDate>%s</endDate>' % (tempdate)
# TODO: timezone???
s += '</dateTimeDuration></dateTimeDurations>'
s += '<categoryTags>'
# proper way is too slow...
#cats = opp.getElementsByTagName("category")
#for i,cat in enumerate(cats):
# s += '<categoryTag>%s</categoryTag>' % (cat.attributes["label"].value)
catstrs = re.findall(r'<category term=(["][^"]+["])', oppstr, re.DOTALL)
for cat in catstrs:
s += "<categoryTag>" + xml.sax.saxutils.escape(cat) + "</categoryTag>"
s += '</categoryTags>'
age_range = xmlh.get_tag_val(opp, "gg_age_range")
if re.match(r'and under|Families', age_range):
s += '<minimumAge>0</minimumAge>'
elif re.match(r'Teens', age_range):
s += '<minimumAge>13</minimumAge>'
elif re.match(r'Adults', age_range):
s += '<minimumAge>18</minimumAge>'
elif re.match(r'Seniors', age_range):
s += '<minimumAge>65</minimumAge>'
s += '</VolunteerOpportunity>'
numopps += 1
s += '</VolunteerOpportunities>'
s += '</FootprintFeed>'
#s = re.sub(r'><([^/])', r'>\n<\1', s)
#print s
return s, numorgs, numopps
# pylint: disable-msg=R0915
def parse(s, maxrecs, progress):
"""return FPXML given idealist data"""
s = addCdataToContent(s, progress)
s = remove_g_namespace(s, progress)
s = ParseHelper(s, maxrecs, progress)
return s
| Python |
#!/usr/bin/python
# Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# TODO: remove silly dependency on dapper.net-- thought I'd need
# it for the full scrape, but ended up not going that way.
"""crawler for craigslist until they provide a real feed."""
from xml.dom import minidom
import sys
import os
import urllib
import re
import thread
import time
import datetime
import socket
DEFAULT_TIMEOUT = 10
socket.setdefaulttimeout(DEFAULT_TIMEOUT)
METROS_FN = "craigslist-metros.txt"
CACHE_FN = "craigslist-cache.txt"
pages = {}
page_lock = thread.allocate_lock()
crawlers = 0
crawlers_lock = thread.allocate_lock()
cachefile_lock = thread.allocate_lock()
# set to a lower number if you have problems
MAX_CRAWLERS = 40
def read_metros():
global metros
metros = {}
fh = open(METROS_FN, "r")
for line in fh:
url,name = line.split("|")
metros[url] = name
def crawl_metros():
#<geo dataType="RawString" fieldName="geo" href="http://waterloo.craigslist.org/" originalElement="a" type="field">waterloo / cedar falls</geo>
print "getting toplevel geos..."
fh = urllib.urlopen("http://www.dapper.net/RunDapp?dappName=craigslistmetros&v=1&applyToUrl=http%3A%2F%2Fgeo.craigslist.org%2Fiso%2Fus")
geostr = fh.read()
fh.close()
dom = minidom.parseString(geostr)
nodes = dom.getElementsByTagName("geo")
outfh = open(METROS_FN, "w+")
domains = []
for node in nodes:
domain = node.getAttribute("href")
#print "finding submetros within", domain
fh1 = urllib.urlopen(domain)
domain_homepage = fh1.read()
fh1.close()
#<td align="center" colspan="5" id="topban">
#<div>
#<h2>new york city</h2> <sup><a href="http://en.wikipedia.org/wiki/New_York_City">w</a></sup>
#<span class="for"><a href="/mnh/" title="manhattan">mnh</a> <a href="/brk/" title="brooklyn">brk</a> <a href="/que/" title="queens">que</a> <a href="/brx/" title="bronx">brx</a> <a href="/stn/" title="staten island">stn</a> <a href="/jsy/" title="new jersey">jsy</a> <a href="/lgi/" title="long island">lgi</a> <a href="/wch/" title="westchester">wch</a> <a href="/fct/" title="fairfield">fct</a> </span>
#</div>
#</td>
topbanstrs = re.findall(r'<td align="center" colspan="5" id="topban">.+?</td>', domain_homepage, re.DOTALL)
for topbanstr in topbanstrs:
links = re.findall(r'<a href="/(.+?)".+?title="(.+?)".+?</a>', topbanstr, re.DOTALL)
if len(links) > 0:
for link in links:
print domain+link[0], ":", link[1]
outfh.write(domain+link[0]+"|"+link[1]+"\n")
else:
names = re.findall(r'<h2>(.+?)</h2>', domain_homepage, re.DOTALL)
print domain, ":", names[0]
outfh.write(domain+"|"+names[0]+"\n")
outfh.close()
def crawl(url, ignore):
global crawlers, crawlers_lock, pages, page_lock, MAX_CRAWLERS
if url in pages:
return
while crawlers > MAX_CRAWLERS:
time.sleep(1)
# we don't care if several wake at once
crawlers_lock.acquire()
crawlers = crawlers + 1
crawlers_lock.release()
#proxied_url = "http://suprfetch.appspot.com/?url="+urllib.quote(url+"?for_google_and_craigslist.org_project_footprint_please_dont_block")
proxied_url = "http://suprfetch.appspot.com/?url="+urllib.quote(url)
page = ""
attempts = 0
while attempts < 3 and page == "":
try:
fh = urllib.urlopen(proxied_url)
page = fh.read()
fh.close()
except:
page = "" # in case close() threw exception
attempts = attempts + 1
print "open failed, retry after", attempts, "attempts (url="+url+")"
time.sleep(1)
if re.search(r'This IP has been automatically blocked', page, re.DOTALL):
print "uh oh: craiglist is blocking us (IP blocking). exiting..."
sys.exit(1)
if (re.search(r'sorry.google.com/sorry/', page) or
re.search(r'to automated requests from a computer virus or spyware', page, re.DOTALL)):
print "uh oh: google is blocking us (DOS detector). exiting..."
sys.exit(1)
if re.search(r'<TITLE>302 Moved</TITLE>"',page, re.DOTALL):
newlocstr = re.findall(r'The document has moved <A HREF="(.+?)"',page)
print "being redirected to",newlocstr[0]
crawl(newlocstr[0], "foo")
return
if attempts >= 3:
print "crawl failed after 3 attempts:",url
return
page_lock.acquire()
pages[url] = page
page_lock.release()
cached_page = re.sub(r'(?:\r?\n|\r)',' ',page)
cachefile_lock.acquire()
outfh = open(CACHE_FN, "a")
outfh.write(url+"-Q-"+cached_page+"\n")
outfh.close()
cachefile_lock.release()
crawlers_lock.acquire()
crawlers = crawlers - 1
crawlers_lock.release()
def wait_for_page(url):
res = ""
while res == "":
page_lock.acquire()
if url in pages:
res = pages[url]
page_lock.release()
if res == "":
time.sleep(2)
return res
def sync_fetch(url):
crawl(url, "")
if url not in pages:
print "sync_fetch, failed to crawl url",url
sys.exit(1)
return pages[url]
progstart = time.time()
def secs_since_progstart():
global progstart
return time.time() - progstart
def crawl_metro_page(url, unused):
global crawlers, crawlers_lock, pages, page_lock
listingpage = sync_fetch(url)
listingurls = re.findall(r'<p><a href="/(.+?)">', listingpage)
base = re.sub(r'.org/.+', '.org/', url)
for listing_url in listingurls:
#print "found",base+listing_url,"in",url
crawl(base+listing_url, "")
path = re.sub(r'[^/]+$', '', url)
nextpages = re.findall(r'<a href="(index[0-9]+[.]html)"', listingpage)
for nextpage_url in nextpages:
#print "found",path+nextpage_url,"in",url
thread.start_new_thread(crawl_metro_page, (path+nextpage_url, ""))
def parse_cache_file(s, listings_only=False, printerrors=True):
global pages
for i,line in enumerate(s.splitlines()):
#print line[0:100]
res = re.findall(r'^(.+?)-Q-(.+)', line)
try:
url,page = res[0][0], res[0][1]
if (not listings_only or re.search(r'html$', url)):
pages[url] = page
except:
if printerrors:
print "error parsing cache file on line",i+1
print line
def load_cache():
global CACHE_FN
try:
fh = open(CACHE_FN, "r")
instr = fh.read()
print "closing cache file", CACHE_FN
fh.close()
print "parsing cache data", len(instr), "bytes"
parse_cache_file(instr, False)
print "loaded", len(pages), "pages."
except:
# ignore errors if file doesn't exist
pass
def print_status():
global pages, num_cached_pages, crawlers
samesame = 0
last_crawled_pages = 0
while True:
crawled_pages = len(pages) - num_cached_pages
pages_per_sec = int(crawled_pages/secs_since_progstart())
msg = str(secs_since_progstart())+": main thread: "
msg += "waiting for " + str(crawlers) + " crawlers.\n"
msg += str(crawled_pages) + " pages crawled so far"
msg += "(" + str(pages_per_sec) + " pages/sec). "
msg += str(len(pages)) + " total pages."
print msg
if last_crawled_pages == crawled_pages:
samesame += 1
if samesame >= 100:
print "done (waited long enough)."
break
else:
last_crawled_pages = crawled_pages
time.sleep(2)
from optparse import OptionParser
if __name__ == "__main__":
parser = OptionParser("usage: %prog [options]...")
parser.set_defaults(metros=False)
parser.set_defaults(load_cache=True)
parser.add_option("--metros", action="store_true", dest="metros")
parser.add_option("--load_cache", action="store_true", dest="load_cache")
parser.add_option("--noload_cache", action="store_false", dest="load_cache")
(options, args) = parser.parse_args(sys.argv[1:])
if options.metros:
crawl_metros()
read_metros()
if options.load_cache:
load_cache()
else:
try:
os.unlink(CACHE_FN)
except:
pass
num_cached_pages = len(pages)
outstr = ""
for url in metros:
thread.start_new_thread(crawl_metro_page, (url+"vol/", ""))
print_status()
sys.exit(0)
| Python |
# Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
parser for feed stored in a google spreadsheet
(note that this is different from other parsers inasmuch as it
expects the caller to pass in the providerID and providerName)
"""
# typical cell
#<entry>
#<id>http://spreadsheets.google.com/feeds/cells/pMY64RHUNSVfKYZKPoVXPBg
#/1/public/basic/R14C13</id>
#<updated>2009-04-28T03:29:56.957Z</updated>
#<category scheme='http://schemas.google.com/spreadsheets/2006'
#term='http://schemas.google.com/spreadsheets/2006#cell'/>
#<title type='text'>M14</title>
#<content type='text'>ginny@arthur.edu</content>
#<link rel='self' type='application/atom+xml' href='http://spreadsheets.
#google.com/feeds/cells/pMY64RHUNSVfKYZKPoVXPBg/1/public/basic/R14C13'/>
#</entry>
import xml_helpers as xmlh
import re
import urllib
import sys
import time
from datetime import datetime
CURRENT_ROW = None
def parser_error(msg):
if CURRENT_ROW != None:
msg = "row "+str(CURRENT_ROW)+": "+msg
print "parse_gspreadsheet ERROR: "+msg
def raw_recordval(record, key):
if key in record:
return str(record[key]).strip()
return ""
def recordval(record, key):
return re.sub(r'\s+', ' ', raw_recordval(record, key))
KNOWN_ORGS = {}
def get_dtval(record, field_name):
val = recordval(record, field_name)
if val != "" and not re.match(r'\d\d?/\d\d?/\d\d\d\d', val):
parser_error("bad value in "+field_name+": '"+val+"'-- try MM/DD/YYYY")
return val
def get_tmval(record, field_name):
val = recordval(record, field_name)
if val != "" and not re.match(r'\d?\d:\d\d(:\d\d)?', val):
parser_error("bad value in "+field_name+": '"+val+"'-- try HH:MM:SS")
return val
def record_to_fpxml(record):
fpxml = ""
fpxml += '<VolunteerOpportunity>'
fpxml += xmlh.output_val("volunteerOpportunityID", recordval(record, 'oppid'))
orgname = recordval(record,'SponsoringOrganization')
if orgname not in KNOWN_ORGS:
KNOWN_ORGS[orgname] = len(KNOWN_ORGS)
fpxml += xmlh.output_val("sponsoringOrganizationID", KNOWN_ORGS[orgname])
title = recordval(record,'OpportunityTitle')
if title == "":
parser_error("missing OpportunityTitle-- this field is required.")
fpxml += xmlh.output_val("title", title)
fpxml += '<dateTimeDurations>'
fpxml += '<dateTimeDuration>'
if ('StartDate' in record and
recordval(record,'StartDate').find("ongoing") >= 0):
fpxml += xmlh.output_val('openEnded', 'Yes')
else:
fpxml += xmlh.output_val('openEnded', 'No')
startdtval = get_dtval(record, 'StartDate')
if startdtval != "":
fpxml += xmlh.output_val('startDate', startdtval)
starttmval = get_tmval(record, 'StartTime')
if starttmval != "":
fpxml += xmlh.output_val('startTime', starttmval)
enddtval = get_dtval(record, 'EndDate')
if enddtval != "":
fpxml += xmlh.output_val('endDate', enddtval)
endtmval = get_tmval(record, 'EndTime')
if endtmval != "":
fpxml += xmlh.output_val('endTime', endtmval)
freq = recordval(record,'Frequency').lower()
if freq == "" or freq.find("once") >= 0:
fpxml += '<iCalRecurrence/>'
elif freq.find("daily") >= 0:
fpxml += '<iCalRecurrence>FREQ=DAILY</iCalRecurrence>'
elif freq.find("weekly") >= 0:
fpxml += '<iCalRecurrence>FREQ=WEEKLY</iCalRecurrence>'
elif freq.find("other") >= 0 and freq.find("week") >= 0:
fpxml += '<iCalRecurrence>FREQ=WEEKLY;INTERVAL=2</iCalRecurrence>'
elif freq.find("monthly") >= 0:
fpxml += '<iCalRecurrence>FREQ=MONTHLY</iCalRecurrence>'
else:
parser_error("unsupported frequency: '"+recordval(record,'Frequency')+"'-- skipping")
fpxml += xmlh.output_val('commitmentHoursPerWeek', recordval(record,'CommitmentHours'))
fpxml += '</dateTimeDuration>'
fpxml += '</dateTimeDurations>'
fpxml += '<locations>'
fpxml += '<location>'
if recordval(record,'LocationName').find("virtual") >= 0:
fpxml += xmlh.output_val('virtual', 'Yes')
else:
fpxml += xmlh.output_val('virtual', 'No')
fpxml += xmlh.output_val('name', recordval(record,'LocationName'))
fpxml += xmlh.output_val('streetAddress1', recordval(record,'LocationStreet'))
fpxml += xmlh.output_val('city', recordval(record,'LocationCity'))
fpxml += xmlh.output_val('region', recordval(record,'LocationProvince'))
fpxml += xmlh.output_val('postalCode', recordval(record,'LocationPostalCode'))
fpxml += xmlh.output_val('country', recordval(record,'LocationCountry'))
fpxml += '</location>'
fpxml += '</locations>'
fpxml += xmlh.output_val('paid', recordval(record,'Paid'))
fpxml += xmlh.output_val('minimumAge', recordval(record,'MinimumAge'))
# TODO: seniors only, kidfriendly
fpxml += xmlh.output_val('sexRestrictedTo', recordval(record,'SexRestrictedTo'))
fpxml += xmlh.output_val('skills', recordval(record,'Skills'))
fpxml += xmlh.output_val('contactName', recordval(record,'ContactName'))
fpxml += xmlh.output_val('contactPhone', recordval(record,'ContactPhone'))
fpxml += xmlh.output_val('contactEmail', recordval(record,'ContactEmail'))
fpxml += xmlh.output_val('detailURL', recordval(record,'URL'))
# note: preserve whitespace in description
fpxml += xmlh.output_val('description', raw_recordval(record,'Description'))
fpxml += '<lastUpdated olsonTZ="Etc/UTC">'
fpxml += recordval(record,'LastUpdated') + '</lastUpdated>'
fpxml += '</VolunteerOpportunity>'
return fpxml
def cellval(data, row, col):
key = 'R'+str(row)+'C'+str(col)
if key not in data:
return None
return data[key]
def parse_gspreadsheet(instr, data, updated, progress):
# look ma, watch me parse XML a zillion times faster!
#<entry><id>http://spreadsheets.google.com/feeds/cells/pMY64RHUNSVfKYZKPoVXPBg
#/1/public/basic/R14C15</id><updated>2009-04-28T03:34:21.900Z</updated>
#<category scheme='http://schemas.google.com/spreadsheets/2006'
#term='http://schemas.google.com/spreadsheets/2006#cell'/><title type='text'>
#O14</title><content type='text'>http://www.fake.org/vol.php?id=4</content>
#<link rel='self' type='application/atom+xml'
#href='http://spreadsheets.google.com/feeds/cells/pMY64RHUNSVfKYZKPoVXPBg/1/
#public/basic/R14C15'/></entry>
regexp = re.compile('<entry>.+?(R(\d+)C(\d+))</id>'+
'<updated.*?>(.+?)</updated>.*?'+
'<content.*?>(.+?)</content>.+?</entry>', re.DOTALL)
maxrow = maxcol = 0
for i, match in enumerate(re.finditer(regexp, instr)):
if progress and i > 0 and i % 250 == 0:
print str(datetime.now())+": ", i, " cells processed."
lastupd = re.sub(r'([.][0-9]+)?Z?$', '', match.group(4)).strip()
#print "lastupd='"+lastupd+"'"
updated[match.group(1)] = lastupd.strip("\r\n\t ")
val = match.group(5).strip("\r\n\t ")
data[match.group(1)] = val
row = match.group(2)
if row > maxrow:
maxrow = row
col = match.group(3)
if col > maxcol:
maxcol = col
#print row, col, val
return maxrow, maxcol
def read_gspreadsheet(url, data, updated, progress):
# read the spreadsheet into a big string
infh = urllib.urlopen(url)
instr = infh.read()
infh.close()
return parse_gspreadsheet(instr, data, updated, progress)
def find_header_row(data, regexp_str):
regexp = re.compile(regexp_str, re.IGNORECASE|re.DOTALL)
header_row = header_startcol = -1
for row in range(20):
if header_row != -1:
break
for col in range(5):
val = cellval(data, row, col)
if (val and re.search(regexp, val)):
header_row = row
header_startcol = col
break
if header_row == -1:
parser_error("no header row found: looked for "+regexp_str)
if header_startcol == -1:
parser_error("no header start column found")
return header_row, header_startcol
def parse(instr, maxrecs, progress):
# TODO: a spreadsheet should really be an object and cellval a method
data = {}
updated = {}
maxrow, maxcol = parse_gspreadsheet(instr, data, updated, progress)
# find header row: look for "opportunity title" (case insensitive)
header_row, header_startcol = find_header_row(data, 'opportunity\s*title')
header_colidx = {}
header_names = {}
header_col = header_startcol
while True:
header_str = cellval(data, header_row, header_col)
if not header_str:
break
field_name = None
header_str = header_str.lower()
if header_str.find("title") >= 0:
field_name = "OpportunityTitle"
elif header_str.find("organization") >= 0 and header_str.find("sponsor") >= 0:
field_name = "SponsoringOrganization"
elif header_str.find("description") >= 0:
field_name = "Description"
elif header_str.find("skills") >= 0:
field_name = "Skills"
elif header_str.find("location") >= 0 and header_str.find("name") >= 0:
field_name = "LocationName"
elif header_str.find("street") >= 0:
field_name = "LocationStreet"
elif header_str.find("city") >= 0:
field_name = "LocationCity"
elif header_str.find("state") >= 0 or header_str.find("province") >= 0:
field_name = "LocationProvince"
elif header_str.find("zip") >= 0 or header_str.find("postal") >= 0:
field_name = "LocationPostalCode"
elif header_str.find("country") >= 0:
field_name = "LocationCountry"
elif header_str.find("start") >= 0 and header_str.find("date") >= 0:
field_name = "StartDate"
elif header_str.find("start") >= 0 and header_str.find("time") >= 0:
field_name = "StartTime"
elif header_str.find("end") >= 0 and header_str.find("date") >= 0:
field_name = "EndDate"
elif header_str.find("end") >= 0 and header_str.find("time") >= 0:
field_name = "EndTime"
elif header_str.find("contact") >= 0 and header_str.find("name") >= 0:
field_name = "ContactName"
elif header_str.find("email") >= 0 or header_str.find("e-mail") >= 0:
field_name = "ContactEmail"
elif header_str.find("phone") >= 0:
field_name = "ContactPhone"
elif header_str.find("website") >= 0 or header_str.find("url") >= 0:
field_name = "URL"
elif header_str.find("often") >= 0:
field_name = "Frequency"
elif header_str.find("days") >= 0 and header_str.find("week") >= 0:
field_name = "DaysOfWeek"
elif header_str.find("paid") >= 0:
field_name = "Paid"
elif header_str.find("commitment") >= 0 or header_str.find("hours") >= 0:
field_name = "CommitmentHours"
elif header_str.find("age") >= 0 and header_str.find("min") >= 0:
field_name = "MinimumAge"
elif header_str.find("kid") >= 0:
field_name = "KidFriendly"
elif header_str.find("senior") >= 0 and header_str.find("only") >= 0:
field_name = "SeniorsOnly"
elif header_str.find("sex") >= 0 or header_str.find("gender") >= 0:
field_name = "SexRestrictedTo"
elif header_str.find("volunteer appeal") >= 0:
field_name = None
else:
parser_error("couldn't map header '"+header_str+"' to a field name.")
if field_name != None:
header_colidx[field_name] = header_col
header_names[header_col] = field_name
#print header_str, "=>", field_name
header_col += 1
if len(header_names) < 10:
parser_error("too few fields found: "+str(len(header_names)))
# check to see if there's a header-description row
header_desc = cellval(data, header_row+1, header_startcol)
if not header_desc:
parser_error("blank row not allowed below header row")
header_desc = header_desc.lower()
data_startrow = header_row + 1
if header_desc.find("up to") >= 0:
data_startrow += 1
# find the data
global CURRENT_ROW
CURRENT_ROW = row = data_startrow
blankrows = 0
MAX_BLANKROWS = 2
volopps = '<VolunteerOpportunities>'
numorgs = numopps = 0
while True:
blankrow = True
#rowstr = "row="+str(row)+"\n"
record = {}
record['LastUpdated'] = '0000-00-00'
for field_name in header_colidx:
col = header_colidx[field_name]
val = cellval(data, row, col)
if val:
blankrow = False
else:
val = ""
#rowstr += " "+field_name+"="+val+"\n"
record[field_name] = val
key = 'R'+str(row)+'C'+str(col)
if (key in updated and
updated[key] > record['LastUpdated']):
record['LastUpdated'] = updated[key]
if blankrow:
blankrows += 1
if blankrows > MAX_BLANKROWS:
break
else:
numopps += 1
blankrows = 0
record['oppid'] = str(numopps)
volopps += record_to_fpxml(record)
row += 1
CURRENT_ROW = row
CURRENT_ROW = None
volopps += '</VolunteerOpportunities>'
outstr = '<?xml version="1.0" ?>'
outstr += '<FootprintFeed schemaVersion="0.1">'
outstr += '<FeedInfo>'
# providerID replaced by caller
outstr += '<providerID></providerID>'
# providerName replaced by caller
outstr += '<providerName></providerName>'
outstr += '<feedID>1</feedID>'
outstr += '<createdDateTime>%s</createdDateTime>' % xmlh.current_ts()
# providerURL replaced by caller
outstr += '<providerURL></providerURL>'
outstr += '<description></description>'
outstr += '</FeedInfo>'
outstr += "<Organizations>"
for orgname in KNOWN_ORGS:
outstr += "<Organization>"
outstr += xmlh.output_val("organizationID", KNOWN_ORGS[orgname])
outstr += xmlh.output_val("name", orgname)
outstr += "</Organization>"
outstr += "</Organizations>"
outstr += volopps
outstr += '</FootprintFeed>'
#outstr = re.sub(r'><', '>\n<', outstr)
#print outstr
return outstr, numorgs, numopps
| Python |
# Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
parser for usaservice.org
"""
import xml_helpers as xmlh
import re
from datetime import datetime
import dateutil.parser
# pylint: disable-msg=R0915
def parse(instr, maxrecs, progress):
"""return FPXML given usaservice data"""
# TODO: progress
known_elnames = [ 'channel', 'db:abstract', 'db:address', 'db:attendee_count', 'db:categories', 'db:city', 'db:country', 'db:county', 'db:dateTime', 'db:event', 'db:eventType', 'db:guest_total', 'db:host', 'db:latitude', 'db:length', 'db:longitude', 'db:rsvp', 'db:scheduledTime', 'db:state', 'db:street', 'db:title', 'db:venue_name', 'db:zipcode', 'description', 'docs', 'guid', 'item', 'language', 'link', 'pubDate', 'rss', 'title', ]
# convert to footprint format
s = '<?xml version="1.0" ?>'
s += '<FootprintFeed schemaVersion="0.1">'
s += '<FeedInfo>'
# TODO: assign provider IDs?
s += '<providerID>101</providerID>'
s += '<providerName>usaservice.org</providerName>'
s += '<feedID>1</feedID>'
s += '<createdDateTime>%s</createdDateTime>' % xmlh.current_ts()
s += '<providerURL>http://www.usaservice.org/</providerURL>'
s += '<description>Syndicated events</description>'
# TODO: capture ts -- use now?!
s += '</FeedInfo>'
numorgs = numopps = 0
# hardcoded: Organization
s += '<Organizations>'
s += '<Organization>'
s += '<organizationID>0</organizationID>'
s += '<nationalEIN></nationalEIN>'
s += '<name></name>'
s += '<missionStatement></missionStatement>'
s += '<description></description>'
s += '<location><city></city><region></region><postalCode></postalCode></location>'
s += '<organizationURL></organizationURL>'
s += '<donateURL></donateURL>'
s += '<logoURL></logoURL>'
s += '<detailURL></detailURL>'
s += '</Organization>'
numorgs += 1
s += '</Organizations>'
s += '<VolunteerOpportunities>'
instr = re.sub(r'<(/?db):', r'<\1_', instr)
for i, line in enumerate(instr.splitlines()):
if (maxrecs>0 and i>maxrecs):
break
xmlh.print_rps_progress("opps", progress, i, maxrecs)
item = xmlh.simple_parser(line, known_elnames, progress=False)
# unmapped: db_rsvp (seems to be same as link, but with #rsvp at end of url?)
# unmapped: db_host (no equivalent?)
# unmapped: db_county (seems to be empty)
# unmapped: attendee_count
# unmapped: guest_total
# unmapped: db_title (dup of title, above)
s += '<VolunteerOpportunity>'
s += '<volunteerOpportunityID>%s</volunteerOpportunityID>' % (xmlh.get_tag_val(item, "guid"))
# hardcoded: sponsoringOrganizationID
s += '<sponsoringOrganizationIDs><sponsoringOrganizationID>0</sponsoringOrganizationID></sponsoringOrganizationIDs>'
# hardcoded: volunteerHubOrganizationID
s += '<volunteerHubOrganizationIDs><volunteerHubOrganizationID>0</volunteerHubOrganizationID></volunteerHubOrganizationIDs>'
s += '<title>%s</title>' % (xmlh.get_tag_val(item, "title"))
s += '<abstract>%s</abstract>' % (xmlh.get_tag_val(item, "abstract"))
s += '<volunteersNeeded>-8888</volunteersNeeded>'
dbscheduledTimes = item.getElementsByTagName("db_scheduledTime")
if (dbscheduledTimes.length != 1):
print datetime.now(), "parse_usaservice: only 1 db_scheduledTime supported."
return None
dbscheduledTime = dbscheduledTimes[0]
s += '<dateTimeDurations><dateTimeDuration>'
length = xmlh.get_tag_val(dbscheduledTime, "db_length")
if length == "" or length == "-1":
s += '<openEnded>Yes</openEnded>'
else:
s += '<openEnded>No</openEnded>'
date, time = xmlh.get_tag_val(dbscheduledTime, "db_dateTime").split(" ")
s += '<startDate>%s</startDate>' % (date)
# TODO: timezone???
s += '<startTime>%s</startTime>' % (time)
s += '</dateTimeDuration></dateTimeDurations>'
dbaddresses = item.getElementsByTagName("db_address")
if (dbaddresses.length != 1):
print datetime.now(), "parse_usaservice: only 1 db_address supported."
return None
dbaddress = dbaddresses[0]
s += '<locations><location>'
s += '<name>%s</name>' % (xmlh.get_tag_val(item, "db_venue_name"))
s += '<streetAddress1>%s</streetAddress1>' % (xmlh.get_tag_val(dbaddress, "db_street"))
s += '<city>%s</city>' % (xmlh.get_tag_val(dbaddress, "db_city"))
s += '<region>%s</region>' % (xmlh.get_tag_val(dbaddress, "db_state"))
s += '<country>%s</country>' % (xmlh.get_tag_val(dbaddress, "db_country"))
s += '<postalCode>%s</postalCode>' % (xmlh.get_tag_val(dbaddress, "db_zipcode"))
s += '<latitude>%s</latitude>' % (xmlh.get_tag_val(item, "db_latitude"))
s += '<longitude>%s</longitude>' % (xmlh.get_tag_val(item, "db_longitude"))
s += '</location></locations>'
type = xmlh.get_tag_val(item, "db_eventType")
s += '<categoryTags><categoryTag>%s</categoryTag></categoryTags>' % (type)
s += '<contactName>%s</contactName>' % xmlh.get_tag_val(item, "db_host")
s += '<detailURL>%s</detailURL>' % (xmlh.get_tag_val(item, "link"))
s += '<description>%s</description>' % (xmlh.get_tag_val(item, "description"))
pubdate = xmlh.get_tag_val(item, "pubDate")
if re.search("[0-9][0-9] [A-Z][a-z][a-z] [0-9][0-9][0-9][0-9]", pubdate):
# TODO: parse() is ignoring timzone...
ts = dateutil.parser.parse(pubdate)
pubdate = ts.strftime("%Y-%m-%dT%H:%M:%S")
s += '<lastUpdated>%s</lastUpdated>' % (pubdate)
s += '</VolunteerOpportunity>'
numopps += 1
s += '</VolunteerOpportunities>'
s += '</FootprintFeed>'
#s = re.sub(r'><([^/])', r'>\n<\1', s)
return s, numorgs, numopps
| Python |
# Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
appengine main() for when the site is down.
"""
# note: view classes aren inherently not pylint-compatible
# pylint: disable-msg=C0103
# pylint: disable-msg=W0232
# pylint: disable-msg=E1101
# pylint: disable-msg=R0903
from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import run_wsgi_app
class SiteDownHandler(webapp.RequestHandler):
"""use a redirect so search engines don't index this as our homepage."""
def get(self):
"""GET handler"""
self.redirect("/site_down.html")
def main():
"""main function"""
run_wsgi_app(webapp.WSGIApplication([(r'/.*', SiteDownHandler)], debug=False))
if __name__ == "__main__":
main()
| Python |
Subsets and Splits
SQL Console for ajibawa-2023/Python-Code-Large
Provides a useful breakdown of language distribution in the training data, showing which languages have the most samples and helping identify potential imbalances across different language groups.