code stringlengths 1 1.72M | language stringclasses 1 value |
|---|---|
# -*- coding: utf-8 -*-
from appenginepatcher import on_production_server, have_appserver
import os
DEBUG = not on_production_server
# The MEDIA_VERSION will get integrated via %d
MEDIA_URL = '/media/%d/'
# The MEDIA_URL will get integrated via %s
ADMIN_MEDIA_PREFIX = '%sadmin_media/'
ADMINS = ()
DATABASE_ENGINE = 'appengine'
DATABASE_SUPPORTS_TRANSACTIONS = False
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
EMAIL_HOST = 'localhost'
EMAIL_PORT = 25
EMAIL_HOST_USER = 'user'
EMAIL_HOST_PASSWORD = 'password'
EMAIL_USE_TLS = True
DEFAULT_FROM_EMAIL = 'user@localhost'
SERVER_EMAIL = 'user@localhost'
LOGIN_REQUIRED_PREFIXES = ()
NO_LOGIN_REQUIRED_PREFIXES = ()
ROOT_URLCONF = 'urls'
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.load_template_source',
'ragendja.template.app_prefixed_loader',
'django.template.loaders.app_directories.load_template_source',
)
PROJECT_DIR = os.path.dirname(os.path.dirname(os.path.dirname(
os.path.dirname(__file__))))
COMMON_DIR = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
MAIN_DIRS = (PROJECT_DIR, COMMON_DIR)
TEMPLATE_DIRS = tuple([os.path.join(dir, 'templates') for dir in MAIN_DIRS])
LOCALE_PATHS = (
os.path.join(PROJECT_DIR, 'media', 'locale'),
) + tuple([os.path.join(dir, 'locale') for dir in TEMPLATE_DIRS])
FILE_UPLOAD_HANDLERS = (
'django.core.files.uploadhandler.MemoryFileUploadHandler',
)
CACHE_BACKEND = 'memcached://?timeout=0'
COMBINE_MEDIA = {}
if not on_production_server:
INTERNAL_IPS = ('127.0.0.1',)
IGNORE_APP_SETTINGS = ()
| Python |
from django.conf import settings
from django.http import HttpResponseServerError
from ragendja.template import render_to_string
def server_error(request, *args, **kwargs):
debugkey = request.REQUEST.get('debugkey')
if debugkey and debugkey == getattr(settings, 'DEBUGKEY', None):
import sys
from django.views import debug
return debug.technical_500_response(request, *sys.exc_info())
return HttpResponseServerError(render_to_string(request, '500.html'))
def maintenance(request, *args, **kwargs):
return HttpResponseServerError(render_to_string(request,
'maintenance.html'))
| Python |
# -*- coding: utf-8 -*-
from django.conf import settings
from django.utils.cache import patch_cache_control
from google.appengine.runtime.apiproxy_errors import CapabilityDisabledError
from google.appengine.ext import db
from ragendja.template import render_to_response
from ragendja.views import server_error, maintenance
LOGIN_REQUIRED_PREFIXES = getattr(settings, 'LOGIN_REQUIRED_PREFIXES', ())
NO_LOGIN_REQUIRED_PREFIXES = getattr(settings, 'NO_LOGIN_REQUIRED_PREFIXES', ())
class LoginRequiredMiddleware(object):
"""
Redirects to login page if request path begins with a
LOGIN_REQURED_PREFIXES prefix. You can also specify
NO_LOGIN_REQUIRED_PREFIXES which take precedence.
"""
def process_request(self, request):
for prefix in NO_LOGIN_REQUIRED_PREFIXES:
if request.path.startswith(prefix):
return None
for prefix in LOGIN_REQUIRED_PREFIXES:
if request.path.startswith(prefix) and \
not request.user.is_authenticated():
from django.contrib.auth.views import redirect_to_login
return redirect_to_login(request.get_full_path())
return None
class NoHistoryCacheMiddleware(object):
"""
If user is authenticated we disable browser caching of pages in history.
"""
def process_response(self, request, response):
if 'Expires' not in response and \
'Cache-Control' not in response and \
hasattr(request, 'session') and \
request.user.is_authenticated():
patch_cache_control(response,
no_store=True, no_cache=True, must_revalidate=True, max_age=0)
return response
class ErrorMiddleware(object):
"""Displays a default template on CapabilityDisabledError."""
def process_exception(self, request, exception):
if isinstance(exception, CapabilityDisabledError):
return maintenance(request)
elif isinstance(exception, db.Timeout):
return server_error(request)
| Python |
# -*- coding: utf-8 -*-
from settings import *
import sys
if '%d' in MEDIA_URL:
MEDIA_URL = MEDIA_URL % MEDIA_VERSION
if '%s' in ADMIN_MEDIA_PREFIX:
ADMIN_MEDIA_PREFIX = ADMIN_MEDIA_PREFIX % MEDIA_URL
TEMPLATE_DEBUG = DEBUG
MANAGERS = ADMINS
# You can override Django's or some apps' locales with these folders:
if os.path.exists(os.path.join(COMMON_DIR, 'locale_overrides_common')):
INSTALLED_APPS += ('locale_overrides_common',)
if os.path.exists(os.path.join(PROJECT_DIR, 'locale_overrides')):
INSTALLED_APPS += ('locale_overrides',)
# Add admin interface media files if necessary
if 'django.contrib.admin' in INSTALLED_APPS:
INSTALLED_APPS += ('django_aep_export.admin_media',)
# Always add Django templates (exported from zip)
INSTALLED_APPS += (
'django_aep_export.django_templates',
)
# Convert all COMBINE_MEDIA to lists
for key, value in COMBINE_MEDIA.items():
if not isinstance(value, list):
COMBINE_MEDIA[key] = list(value)
# Add start markers, so apps can insert JS/CSS at correct position
def add_app_media(combine, *appmedia):
if on_production_server:
return
COMBINE_MEDIA.setdefault(combine, [])
if '!START!' not in COMBINE_MEDIA[combine]:
COMBINE_MEDIA[combine].insert(0, '!START!')
index = COMBINE_MEDIA[combine].index('!START!')
COMBINE_MEDIA[combine][index:index] = appmedia
def add_uncombined_app_media(app):
"""Copy all media files directly"""
if on_production_server:
return
path = os.path.join(
os.path.dirname(__import__(app, {}, {}, ['']).__file__), 'media')
app = app.rsplit('.', 1)[-1]
for root, dirs, files in os.walk(path):
for file in files:
if file.endswith(('.css', '.js')):
base = os.path.join(root, file)[len(path):].replace(os.sep,
'/').lstrip('/')
target = '%s/%s' % (app, base)
add_app_media(target, target)
if have_appserver or on_production_server:
check_app_imports = None
else:
def check_app_imports(app):
before = sys.modules.keys()
__import__(app, {}, {}, [''])
after = sys.modules.keys()
added = [key[len(app)+1:] for key in after if key not in before and
key.startswith(app + '.') and key[len(app)+1:]]
if added:
import logging
logging.warn('The app "%(app)s" contains imports in '
'its __init__.py (at least %(added)s). This can cause '
'strange bugs due to recursive imports! You should '
'either do the import lazily (within functions) or '
'ignore the app settings/urlsauto with '
'IGNORE_APP_SETTINGS and IGNORE_APP_URLSAUTO in '
'your settings.py.'
% {'app': app, 'added': ', '.join(added)})
# Import app-specific settings
_globals = globals()
class _Module(object):
def __setattr__(self, key, value):
_globals[key] = value
def __getattribute__(self, key):
return _globals[key]
def __hasattr__(self, key):
return key in _globals
settings = _Module()
for app in INSTALLED_APPS:
# This is an optimization. Django's apps don't have special settings.
# Also, allow for ignoring some apps' settings.
if app.startswith('django.') or app.endswith('.*') or \
app == 'appenginepatcher' or app in IGNORE_APP_SETTINGS:
continue
try:
# First we check if __init__.py doesn't import anything
if check_app_imports:
check_app_imports(app)
__import__(app + '.settings', {}, {}, [''])
except ImportError:
pass
# Remove start markers
for key, value in COMBINE_MEDIA.items():
if '!START!' in value:
value.remove('!START!')
try:
from settings_overrides import *
except ImportError:
pass
| Python |
# -*- coding: utf-8 -*-
from django.db.models import signals
from django.http import Http404
from django.utils import simplejson
from google.appengine.ext import db
from ragendja.pyutils import getattr_by_path
from random import choice
from string import ascii_letters, digits
def get_filters(*filters):
"""Helper method for get_filtered."""
if len(filters) % 2 == 1:
raise ValueError('You must supply an even number of arguments!')
return zip(filters[::2], filters[1::2])
def get_filtered(data, *filters):
"""Helper method for get_xxx_or_404."""
for filter in get_filters(*filters):
data.filter(*filter)
return data
def get_object(model, *filters_or_key, **kwargs):
if kwargs.get('key_name'):
item = model.get_by_key_name(kwargs.get('key_name'),
parent=kwargs.get('parent'))
elif kwargs.get('id'):
item = model.get_by_id(kwargs.get('id'),
parent=kwargs.get('parent'))
elif len(filters_or_key) > 1:
item = get_filtered(model.all(), *filters_or_key).get()
else:
error = None
if isinstance(filters_or_key[0], (tuple, list)):
error = [None for index in range(len(filters_or_key[0]))]
try:
item = model.get(filters_or_key[0])
except (db.BadKeyError, db.KindError):
return error
return item
def get_object_or_404(model, *filters_or_key, **kwargs):
item = get_object(model, *filters_or_key, **kwargs)
if not item:
raise Http404('Object does not exist!')
return item
def get_object_list(model, *filters):
return get_filtered(model.all(), *filters)
def get_list_or_404(model, *filters):
data = get_object_list(model, *filters)
if not data.count(1):
raise Http404('No objects found!')
return data
KEY_NAME_PREFIX = 'k'
def generate_key_name(*values):
"""
Escapes a string such that it can be used safely as a key_name.
You can pass multiple values in order to build a path.
"""
return KEY_NAME_PREFIX + '/'.join(
[value.replace('%', '%1').replace('/', '%2') for value in values])
def transaction(func):
"""Decorator that always runs the given function in a transaction."""
def _transaction(*args, **kwargs):
return db.run_in_transaction(func, *args, **kwargs)
# In case you need to run it without a transaction you can call
# <func>.non_transactional(...)
_transaction.non_transactional = func
return _transaction
@transaction
def db_add(model, key_name, parent=None, **kwargs):
"""
This function creates an object transactionally if it does not exist in
the datastore. Otherwise it returns None.
"""
existing = model.get_by_key_name(key_name, parent=parent)
if not existing:
new_entity = model(parent=parent, key_name=key_name, **kwargs)
new_entity.put()
return new_entity
return None
def db_create(model, parent=None, key_name_format=u'%s',
non_transactional=False, **kwargs):
"""
Creates a new model instance with a random key_name and puts it into the
datastore.
"""
func = non_transactional and db_add.non_transactional or db_add
charset = ascii_letters + digits
while True:
# The key_name is 16 chars long. Make sure that the first char doesn't
# begin with a digit.
key_name = key_name_format % (choice(ascii_letters) +
''.join([choice(charset) for i in range(15)]))
result = func(model, key_name, parent=parent, **kwargs)
if result:
return result
def prefetch_references(object_list, references, cache=None):
"""
Dereferences the given (Key)ReferenceProperty fields of a list of objects
in as few get() calls as possible.
"""
if object_list and references:
if not isinstance(references, (list, tuple)):
references = (references,)
model = object_list[0].__class__
targets = {}
# Collect models and keys of all reference properties.
# Storage format of targets: models -> keys -> instance, property
for name in set(references):
property = getattr(model, name)
is_key_reference = isinstance(property, KeyReferenceProperty)
if is_key_reference:
target_model = property.target_model
else:
target_model = property.reference_class
prefetch = targets.setdefault(target_model.kind(),
(target_model, {}))[1]
for item in object_list:
if is_key_reference:
# Check if we already dereferenced the property
if hasattr(item, '_ref_cache_for_' + property.target_name):
continue
key = getattr(item, property.target_name)
if property.use_key_name and key:
key = db.Key.from_path(target_model.kind(), key)
else:
if ReferenceProperty.is_resolved(property, item):
continue
key = property.get_value_for_datastore(item)
if key:
# Check if we already have a matching item in cache
if cache:
found_cached = None
for cached_item in cache:
if cached_item.key() == key:
found_cached = cached_item
if found_cached:
setattr(item, name, found_cached)
continue
# No item found in cache. Retrieve it.
key = str(key)
prefetch[key] = prefetch.get(key, ()) + ((item, name),)
for target_model, prefetch in targets.values():
prefetched_items = target_model.get(prefetch.keys())
for prefetched, group in zip(prefetched_items, prefetch.values()):
for item, reference in group:
# If prefetched is None we only update the cache
if not prefetched:
property = getattr(model, reference)
if isinstance(property, KeyReferenceProperty):
setattr(item,
'_ref_cache_for_' + property.target_name, None)
else:
continue
setattr(item, reference, prefetched)
return object_list
# Deprecated due to uglyness! :)
class KeyReferenceProperty(object):
"""
Creates a cached accessor for a model referenced by a string property
that stores a str(key) or key_name. This is useful if you need to work with
the key of a referenced object, but mustn't get() it from the datastore.
You can also integrate properties of the referenced model into the
referencing model, so you don't need to dereference the model within a
transaction. Note that if the referenced model's properties change you
won't be notified, automatically.
"""
def __init__(self, property, model, use_key_name=True, integrate={}):
if isinstance(property, basestring):
self.target_name = property
else:
# Monkey-patch the target property, so we can monkey-patch the
# model class, so we can detect when the user wants to set our
# KeyReferenceProperty via the model constructor.
# What an ugly hack; but this is the simplest implementation. :(
# One alternative would be to implement a proxy model that
# provides direct access to the key, but this won't work with
# isinstance(). Maybe that's an option for Python 3000.
# Yet another alternative would be to force the user to choose
# either .key_name or .reference manually. That's rather ugly, too.
self.target_name = None
myself = self
old_config = property.__property_config__
def __property_config__(model_class, property_name):
myself.target_name = property_name
my_name = None
for key, value in model_class.__dict__.items():
if value is myself:
my_name = key
break
old_init = model_class.__init__
def __init__(self, *args, **kwargs):
if my_name in kwargs:
setattr(self, my_name, kwargs[my_name])
kwargs[property_name] = getattr(self, property_name)
for destination, source in myself.integrate.items():
integrate_value = None
if kwargs[my_name]:
try:
property = getattr(self.__class__, source)
except:
property = None
if property and isinstance(property, db.ReferenceProperty):
integrate_value = property.get_value_for_datastore(self)
else:
integrate_value = getattr_by_path(
kwargs[my_name], source)
kwargs[destination] = integrate_value
old_init(self, *args, **kwargs)
model_class.__init__ = __init__
old_config(model_class, property_name)
property.__property_config__ = __property_config__
self.target_model = model
self.use_key_name = use_key_name
self.integrate = integrate
def __get__(self, instance, unused):
if instance is None:
return self
attr = getattr(instance, self.target_name)
cache = getattr(instance, '_ref_cache_for_' + self.target_name, None)
if not cache:
cache_key = cache
elif self.use_key_name:
cache_key = cache.key().name()
else:
cache_key = str(cache.key())
if attr != cache_key:
if self.use_key_name:
cache = self.target_model.get_by_key_name(attr)
else:
cache = self.target_model.get(attr)
setattr(instance, '_ref_cache_for_' + self.target_name, cache)
return cache
def __set__(self, instance, value):
if value and not isinstance(value, db.Model):
raise ValueError('You must supply a Model instance.')
if not value:
key = None
elif self.use_key_name:
key = value.key().name()
else:
key = str(value.key())
setattr(instance, '_ref_cache_for_' + self.target_name, value)
setattr(instance, self.target_name, key)
for destination, source in self.integrate.items():
integrate_value = None
if value:
try:
property = getattr(value.__class__, source)
except:
property = None
if property and isinstance(property, db.ReferenceProperty):
integrate_value = property.get_value_for_datastore(value)
else:
integrate_value = getattr_by_path(value, source)
setattr(instance, destination, integrate_value)
# Don't use this, yet. It's not part of the official API!
class ReferenceProperty(db.ReferenceProperty):
def __init__(self, reference_class, integrate={}, **kwargs):
self.integrate = integrate
super(ReferenceProperty, self).__init__(reference_class, **kwargs)
@classmethod
def is_resolved(cls, property, instance):
try:
if not hasattr(instance, property.__id_attr_name()) or \
not getattr(instance, property.__id_attr_name()):
return True
return bool(getattr(instance, property.__resolved_attr_name()))
except:
import logging
logging.exception('ReferenceProperty implementation changed! '
'Update ragendja.dbutils.ReferenceProperty.'
'is_resolved! Exception was:')
return False
def __set__(self, instance, value):
super(ReferenceProperty, self).__set__(instance, value)
for destination, source in self.integrate.items():
integrate_value = None
if value:
try:
property = getattr(value.__class__, source)
except:
property = None
if property and isinstance(property, db.ReferenceProperty):
integrate_value = property.get_value_for_datastore(value)
else:
integrate_value = getattr_by_path(value, source)
setattr(instance, destination, integrate_value)
def to_json_data(model_instance, property_list):
"""
Converts a models into dicts for use with JSONResponse.
You can either pass a single model instance and get a single dict
or a list of models and get a list of dicts.
For security reasons only the properties in the property_list will get
added. If the value of the property has a json_data function its result
will be added, instead.
"""
if hasattr(model_instance, '__iter__'):
return [to_json_data(item, property_list) for item in model_instance]
json_data = {}
for property in property_list:
property_instance = None
try:
property_instance = getattr(model_instance.__class__,
property.split('.', 1)[0])
except:
pass
key_access = property[len(property.split('.', 1)[0]):]
if isinstance(property_instance, db.ReferenceProperty) and \
key_access in ('.key', '.key.name'):
key = property_instance.get_value_for_datastore(model_instance)
if key_access == '.key':
json_data[property] = str(key)
else:
json_data[property] = key.name()
continue
value = getattr_by_path(model_instance, property, None)
value = getattr_by_path(value, 'json_data', value)
json_data[property] = value
return json_data
def _get_included_cleanup_entities(entities, rels_seen, to_delete, to_put):
# Models can define a CLEANUP_REFERENCES attribute if they have
# reference properties that must get geleted with the model.
include_references = getattr(entities[0], 'CLEANUP_REFERENCES', None)
if include_references:
if not isinstance(include_references, (list, tuple)):
include_references = (include_references,)
prefetch_references(entities, include_references)
for entity in entities:
for name in include_references:
subentity = getattr(entity, name)
to_delete.append(subentity)
get_cleanup_entities(subentity, rels_seen=rels_seen,
to_delete=to_delete, to_put=to_put)
def get_cleanup_entities(instance, rels_seen=None, to_delete=None, to_put=None):
if not instance or getattr(instance, '__handling_delete', False):
return [], [], []
if to_delete is None:
to_delete = []
if to_put is None:
to_put = []
if rels_seen is None:
rels_seen = []
# Delete many-to-one relations
for related in instance._meta.get_all_related_objects():
# Check if we already have fetched some of the entities
seen = (instance.key(), related.opts, related.field.name)
if seen in rels_seen:
continue
rels_seen.append(seen)
entities = getattr(instance, related.get_accessor_name(),
related.model.all().filter(related.field.name + ' =', instance))
entities = entities.fetch(501)
for entity in entities[:]:
# Check if we might already have fetched this entity
for item in to_delete:
if item.key() == entity.key():
entities.remove(entity)
break
for item in to_put:
if item.key() == entity.key():
to_put.remove(item)
break
to_delete.extend(entities)
if len(to_delete) > 200:
raise Exception("Can't delete so many entities at once!")
if not entities:
continue
for entity in entities:
get_cleanup_entities(entity, rels_seen=rels_seen,
to_delete=to_delete, to_put=to_put)
_get_included_cleanup_entities(entities, rels_seen, to_delete, to_put)
# Clean up many-to-many relations
for related in instance._meta.get_all_related_many_to_many_objects():
seen = (instance.key(), related.opts, related.field.name)
if seen in rels_seen:
continue
rels_seen.append(seen)
entities = getattr(instance, related.get_accessor_name(),
related.model.all().filter(related.field.name + ' =', instance))
entities = entities.fetch(501)
for entity in entities[:]:
# Check if we might already have fetched this entity
for item in to_put + to_delete:
if item.key() == entity.key():
entities.remove(entity)
entity = item
break
# We assume that data is a list. Remove instance from the list.
data = getattr(entity, related.field.name)
data = [item for item in data
if (isinstance(item, db.Key) and
item != instance.key()) or
item.key() != instance.key()]
setattr(entity, related.field.name, data)
to_put.extend(entities)
if len(to_put) > 200:
raise Exception("Can't change so many entities at once!")
return rels_seen, to_delete, to_put
def cleanup_relations(instance, **kwargs):
if getattr(instance, '__handling_delete', False):
return
rels_seen, to_delete, to_put = get_cleanup_entities(instance)
_get_included_cleanup_entities((instance,), rels_seen, to_delete, to_put)
for entity in [instance] + to_delete:
entity.__handling_delete = True
if to_delete:
db.delete(to_delete)
for entity in [instance] + to_delete:
del entity.__handling_delete
if to_put:
db.put(to_put)
class FakeModel(object):
"""A fake model class which is stored as a string.
This can be useful if you need to emulate some model whose entities
get generated by syncdb and are never modified afterwards.
For example: ContentType and Permission.
Use this with FakeModelProperty and FakeModelListProperty (the latter
simulates a many-to-many relation).
"""
# Important: If you want to change your fields at a later point you have
# to write a converter which upgrades your datastore schema.
fields = ('value',)
def __init__(self, **kwargs):
if sorted(kwargs.keys()) != sorted(self.fields):
raise ValueError('You have to pass the following values to '
'the constructor: %s' % ', '.join(self.fields))
for key, value in kwargs.items():
setattr(self, key, value)
class _meta(object):
installed = True
def get_value_for_datastore(self):
return simplejson.dumps([getattr(self, field) for field in self.fields])
@property
def pk(self):
return self.get_value_for_datastore()
@property
def id(self):
return self.pk
@classmethod
def load(cls, value):
return simplejson.loads(value)
@classmethod
def make_value_from_datastore(cls, value):
return cls(**dict(zip(cls.fields, cls.load(value))))
def __repr__(self):
return '<%s: %s>' % (self.__class__.__name__,
' | '.join([unicode(getattr(self, field))
for field in self.fields]))
class FakeModelProperty(db.Property):
data_type = basestring
def __init__(self, model, raw=False, *args, **kwargs):
self.raw = raw
self.model = model
super(FakeModelProperty, self).__init__(*args, **kwargs)
def validate(self, value):
if isinstance(value, basestring):
value = self.make_value_from_datastore(value)
if not isinstance(value, self.model):
raise db.BadValueError('Value must be of type %s' %
self.model.__name__)
if self.validator is not None:
self.validator(value)
return value
def get_value_for_datastore(self, model_instance):
fake_model = getattr(model_instance, self.name)
if not fake_model:
return None
if not self.indexed:
return db.Text(fake_model.get_value_for_datastore())
return fake_model.get_value_for_datastore()
def make_value_from_datastore(self, value):
if not value:
return None
return self.model.make_value_from_datastore(unicode(value))
def get_value_for_form(self, instance):
return self.get_value_for_datastore(instance)
def make_value_from_form(self, value):
return value
def __set__(self, model_instance, value):
if isinstance(value, basestring):
value = self.make_value_from_datastore(value)
super(FakeModelProperty, self).__set__(model_instance, value)
@classmethod
def get_fake_defaults(self, fake_model, multiple=False, **kwargs):
from ragendja import forms
form = multiple and forms.FakeModelMultipleChoiceField or \
forms.FakeModelChoiceField
defaults = {'form_class': form, 'fake_model': fake_model}
defaults.update(kwargs)
return defaults
def get_form_field(self, **kwargs):
if self.raw:
from django import forms
defaults = kwargs
defaults['widget'] = forms.TextInput(attrs={'size': 80})
else:
defaults = FakeModelProperty.get_fake_defaults(self.model, **kwargs)
return super(FakeModelProperty, self).get_form_field(**defaults)
class FakeModelListProperty(db.ListProperty):
fake_item_type = basestring
def __init__(self, model, *args, **kwargs):
self.model = model
if not kwargs.get('indexed', True):
self.fake_item_type = db.Text
super(FakeModelListProperty, self).__init__(
self.__class__.fake_item_type, *args, **kwargs)
def validate(self, value):
new_value = []
for item in value:
if isinstance(item, basestring):
item = self.make_value_from_datastore([item])[0]
if not isinstance(item, self.model):
raise db.BadValueError('Value must be of type %s' %
self.model.__name__)
new_value.append(item)
if self.validator is not None:
self.validator(new_value)
return new_value
def get_value_for_datastore(self, model_instance):
fake_models = getattr(model_instance, self.name)
if not self.indexed:
return [db.Text(fake_model.get_value_for_datastore())
for fake_model in fake_models]
return [fake_model.get_value_for_datastore()
for fake_model in fake_models]
def make_value_from_datastore(self, value):
return [self.model.make_value_from_datastore(unicode(item))
for item in value]
def get_value_for_form(self, instance):
return self.get_value_for_datastore(instance)
def make_value_from_form(self, value):
return value
def get_form_field(self, **kwargs):
defaults = FakeModelProperty.get_fake_defaults(self.model,
multiple=True, **kwargs)
defaults['required'] = False
return super(FakeModelListProperty, self).get_form_field(**defaults)
class KeyListProperty(db.ListProperty):
"""Simulates a many-to-many relation using a list property.
On the model level you interact with keys, but when used in a ModelForm
you get a ModelMultipleChoiceField (as if it were a ManyToManyField)."""
def __init__(self, reference_class, *args, **kwargs):
self._reference_class = reference_class
super(KeyListProperty, self).__init__(db.Key, *args, **kwargs)
@property
def reference_class(self):
if isinstance(self._reference_class, basestring):
from django.db import models
self._reference_class = models.get_model(
*self._reference_class.split('.', 1))
return self._reference_class
def validate(self, value):
new_value = []
for item in value:
if isinstance(item, basestring):
item = db.Key(item)
if isinstance(item, self.reference_class):
item = item.key()
if not isinstance(item, db.Key):
raise db.BadValueError('Value must be a key or of type %s' %
self.reference_class.__name__)
new_value.append(item)
return super(KeyListProperty, self).validate(new_value)
def get_form_field(self, **kwargs):
from django import forms
defaults = {'form_class': forms.ModelMultipleChoiceField,
'queryset': self.reference_class.all(),
'required': False}
defaults.update(kwargs)
return super(KeyListProperty, self).get_form_field(**defaults)
| Python |
#!/usr/bin/env python
if __name__ == '__main__':
from common.appenginepatch.aecmd import setup_env
setup_env(manage_py_env=True)
# Recompile translation files
from mediautils.compilemessages import updatemessages
updatemessages()
# Generate compressed media files for manage.py update
import sys
from mediautils.generatemedia import updatemedia
if len(sys.argv) >= 2 and sys.argv[1] == 'update':
updatemedia(True)
import settings
from django.core.management import execute_manager
execute_manager(settings)
| Python |
from ragendja.settings_post import settings
settings.add_app_media('combined-%(LANGUAGE_CODE)s.js',
'jquery/jquery.js',
'jquery/jquery.fixes.js',
'jquery/jquery.ajax-queue.js',
'jquery/jquery.bgiframe.js',
'jquery/jquery.livequery.js',
'jquery/jquery.form.js',
)
| Python |
#!/usr/bin/env python
if __name__ == '__main__':
from common.appenginepatch.aecmd import setup_env
setup_env(manage_py_env=True)
# Recompile translation files
from mediautils.compilemessages import updatemessages
updatemessages()
# Generate compressed media files for manage.py update
import sys
from mediautils.generatemedia import updatemedia
if len(sys.argv) >= 2 and sys.argv[1] == 'update':
updatemedia(True)
import settings
from django.core.management import execute_manager
execute_manager(settings)
| Python |
'''
Created on May 21, 2010
Cleans up projects stuck in In_Progress for GitHub
@author: StevenNorris
'''
import sys
from GitHubutils import GitHubutils
import traceback
def main(argv):
try:
datasource_id=argv[1]
test=argv[2]
except:
print("Format arguments thusly: [program] [datasource_id] [True/False(TestMode)]")
sys.exit()
try:
#checks for test mode
if(test=='True'):
print('TEST MODE ACTIVATED')
utils=GitHubutils("dbInfoTest.txt")
else:
utils=GitHubutils("dbInfo.txt")
except:
print("Please create the dbInfo.txt and dbInfoTest.txt files. Check ReadMe for formatting.")
sys.exit()
print("Cleaning up XMLgathering")
job=utils.get_cleanup_job(datasource_id, 'XMLgathering')
if(utils.error):
sys.exit()
while(job!=None):
#cleans up for each project
try:
project_name=job[0]
developer_name=job[1]
print('Cleaning up for '+project_name+' by '+developer_name)
utils.delete_project(datasource_id,project_name,developer_name)
utils.change_status('XMLgathering','Clean_Up', datasource_id, project_name, developer_name)
job=utils.get_cleanup_job(datasource_id, 'XMLgathering')
if(utils.error):
sys.exit()
#if clean up fails
except:
print("!!!!WARNING!!!! Clean up failed")
utils.post_error('CleanUp(XMLgathering): \n'+traceback.format_exc(), datasource_id, project_name, developer_name)
job=utils.get_cleanup_job(datasource_id, 'XMLgathering')
if(utils.error):
sys.exit()
print("Cleaning up Parsing")
job=utils.get_cleanup_job(datasource_id, 'Parsing')
if(utils.error):
sys.exit()
while(job!=None):
#cleans up for each project
try:
project_name=job[0]
developer_name=job[1]
print('Cleaning up for '+project_name+' by '+developer_name)
utils.change_status('Parsing','Clean_Up', datasource_id, project_name, developer_name)
job=utils.get_cleanup_job(datasource_id, 'Parsing')
if(utils.error):
sys.exit()
#if clean up fails
except:
print("!!!!WARNING!!!! Clean up failed")
utils.post_error('CleanUp(Parsing): \n'+traceback.format_exc(), datasource_id, project_name, developer_name)
job=utils.get_cleanup_job(datasource_id, 'Parsing')
if(utils.error):
sys.exit()
main(sys.argv) | Python |
'''
Created on Jun 5, 2009
@author: Steven Norris
This module provides basic utilities for the FLOSS mole spiders.
'''
import MySQLdb
import httplib
import traceback
class GitHubutils:
#this gathers the initial connection to the database
def __init__(self,file_name):
try:
dbfile = open(file_name, 'r')
except:
raise Exception("Database file error: dbinfo.txt")
self.host = dbfile.readline().strip()
self.port = int(dbfile.readline().strip())
self.username = dbfile.readline().strip()
self.password = dbfile.readline().strip()
self.database = dbfile.readline().strip()
self.db=MySQLdb.connect(host=self.host, user=self.username, passwd=self.password, db=self.database)
self.cursor = self.db.cursor()
self.error=False
'''
This method provides the ability to gather a page
'''
def get_page(self,url):
try:
conn=httplib.HTTPConnection('github.com')
conn.request("GET",url)
resp=conn.getresponse()
html_page=resp.read()
html_page=str(html_page)
conn.close()
return html_page
except:
print ("The page request failed.")
'''
This method provides the ability to insert into a database
'''
def db_insert(self,query_string,*params):
try:
self.cursor.execute(query_string, params)
except:
print("!!!!WARNING!!!! Insertion into "+self.database+" failed.")
print(traceback.format_exc())
'''
This method provides the ability to get a job from the job database.
'''
def get_job(self, datasource_id, status):
lock = '''LOCK TABLE gh_jobs READ, gh_jobs AS t WRITE'''
select = '''SELECT project_name,developer_name
FROM gh_jobs AS t
WHERE status = %s
AND datasource_id = %s
LIMIT 1'''
update='''UPDATE gh_jobs AS t SET status='In_Progress', last_modified=NOW()
WHERE datasource_id=%s
AND project_name=%s
AND developer_name=%s
'''
unlock = '''UNLOCK TABLES'''
try:
self.cursor.execute(lock)
self.cursor.execute(select, (status,datasource_id))
result = self.cursor.fetchone()
self.cursor.execute(update,(datasource_id, result[0],result[1]))
self.cursor.execute(unlock)
return result
except:
print ("Finding job failed.")
self.cursor.execute(unlock)
return None
#this method allows for status changes
def change_status(self,status,previous_stage,datasource_id,project,developer):
update='''UPDATE gh_jobs
SET status=%s, previous_stage=%s, last_modified=NOW()
WHERE datasource_id=%s
AND project_name=%s
AND developer_name=%s
'''
try:
self.cursor.execute(update,(status,previous_stage,datasource_id,project,developer))
except:
print('!!!!WARNING!!!! Status '+status+' did not update correctly for '+project+' by '+developer+' with id '+datasource_id+'.')
print(traceback.format_exc())
self.error=True
#this method allows for error posting
def post_error(self,message,datasource_id,project,developer):
update='''UPDATE gh_jobs
SET error_msg=%s, status='error', last_modified=NOW()
WHERE datasource_id=%s
AND project_name=%s
AND developer_name=%s'''
gather='''SELECT status FROM gh_jobs
WHERE datasource_id=%s
AND project_name=%s
AND developer_name=%s
LIMIT 1'''
try:
self.cursor.execute(gather,(datasource_id,project,developer))
fail_stage=self.cursor.fetchone()
fail_stage=fail_stage[0]
message=fail_stage+":\n"+message
self.cursor.execute(update,(message,datasource_id,project,developer))
except:
print('!!!!WARNING!!!! Error '+message+'could not be posted to '+project+' for '+developer+' at '+datasource_id+'.')
self.error=True
def gather_xml(self,project_name,developer_name,datasource_id):
gather='''SELECT XML FROM gh_projects
WHERE datasource_id=%s
AND project_name=%s
AND developer_name=%s
LIMIT 1'''
try:
self.cursor.execute(gather,(datasource_id,project_name,developer_name))
xml=self.cursor.fetchone()
xml=xml[0]
except:
print('!!!!WARNING!!!! XML not found for '+project_name+' and '+developer_name+' at '+str(datasource_id))
xml=None
return xml
'''
This method provides the ability to get a clean up job from the job database.
'''
def get_cleanup_job(self, datasource_id, previousStage):
lock = '''LOCK TABLE gh_jobs READ, gh_jobs AS t WRITE'''
select = '''SELECT project_name,developer_name
FROM gh_jobs AS t
WHERE status = 'In_Progress'
AND previous_stage=%s
AND datasource_id = %s
LIMIT 1'''
update='''UPDATE gh_jobs AS t SET status='Clean_Up', last_modified=NOW()
WHERE datasource_id=%s
AND project_name=%s
AND developer_name=%s
'''
unlock = '''UNLOCK TABLES'''
try:
self.cursor.execute(lock)
self.cursor.execute(select, (previousStage,datasource_id))
result = self.cursor.fetchone()
self.cursor.execute(update,(datasource_id, result[0],result[1]))
self.cursor.execute(unlock)
return result
except:
print ("Finding job failed.")
self.cursor.execute(unlock)
return None
'''
Deletes a project for cleanup
'''
def delete_project(self,datasource_id,project,developer):
try:
delete='''DELETE FROM gh_projects WHERE project_name=%s AND developer_name=%s AND datasource_id=%s'''
self.cursor.execute(delete,(project,developer,datasource_id))
except:
print("!!!!WARNING!!!! Deletion of project failed")
print(traceback.format_exc())
| Python |
'''
Created on Jul 18, 2009
@author: Steven Norris
This module creates the jobs to be run for GitHub
RUN INSTRUCTIONS
Run from command line using this format
[Interpret] GitHubJobs.py [DatasourceID] [Test mode True/False]
Test mode is based on string comparison so make sure capitalization and spelling are exact.
'''
from GitHubutils import GitHubutils
from HTMLParser import HTMLParser
import re
import time
import traceback
import sys
import string
import urllib
BASE_SITE='github.com'
'''
This method finds the Next link on the project page
'''
class HasNextSpider(HTMLParser):
check_link=''
#used to reset links after a run
def reset_link(self):
self.check_link=''
#used to handle the start tags of the main page
def handle_starttag(self,tag,attrs):
if tag=='a':
link=attrs[0][1]
try:
hotkey=attrs[1][1]
if re.search("/repositories/recent\?page",link)!=None and hotkey=='l':
self.check_link=link
except:
'''
do nothing
'''
def NextSpider(page):
match=re.compile('><a href="/repositories/recent\?page=\d+?" hotkey="l">Next');
groups=match.findall(page)
if(groups):
link=groups[0]
return link[10:len(link)-16]
else:
return None
def GitHubSpider(page):
match=re.compile('<td class="title">.+?</td>',re.DOTALL)
groups=match.findall(page)
finalLinks=[]
for group in groups:
link=re.findall('href="/.+?/.+?"',group)
link=link[0]
link=link[6:len(link)-1]
finalLinks.append(link)
return finalLinks
'''
This method runs the spider sequence needed to collect the information from github.com
'''
def main(argv):
#Declaring variables and creating spiders
projects_pages="/repositories/recent"
hasNextPage=True
track_page=1
try:
datasource_id=argv[1]
datasource_id=str(datasource_id)
test=argv[2]
except:
print("Format arguments thusly: [program] [datasource_id] [True/False(TestMode)]")
sys.exit()
#checks for test mode
if(test=='True'):
try:
print("TEST MODE ACTIVATED")
less_num=9
utils=GitHubutils("dbInfoTest.txt")
except:
print("Please create the dbInfo.txt and dbInfoTest.txt files. Check ReadMe for formatting.")
sys.exit()
else:
try:
less_num=sys.maxint
utils=GitHubutils("dbInfo.txt")
except:
print("Please create the dbInfo.txt and dbInfoTest.txt files. Check ReadMe for formatting.")
sys.exit()
try:
for letter in string.lowercase:
page = 1
total_pages = 2 #initialize at anything >= 1 (page)
while page <= total_pages:
time.sleep(2)
reader = urllib.urlopen('https://github.com/search?type=Repositories&language=&q='+letter+'&repo=&langOverride=&x=0&y=0&start_value='+str(page))
current_html = reader.read()
reader.close()
if test=="True":
total_pages = 1
else:
try:
total_pages = int(re.search('>(\d+)</a>\n',current_html).group(1))
except:
pass
page += 1
repos = re.findall(r'<a href="/(\w+)/(\w+)">\1 / \2</a>',current_html)
for repo in repos:
project_name = repo[1]
developer_name = repo[0]
status = None
insert='''INSERT IGNORE INTO gh_jobs (datasource_id,project_name,developer_name,status,last_modified) VALUES(%s,%s,%s,%s,NOW())'''
try:
utils.db_insert(insert,datasource_id,project_name,developer_name,'XMLgathering')
except Exception as e:
print e
#Begin loop through project pages
# while(hasNextPage and track_page<less_num):
# print("Beginning on page "+str(track_page))
# print("Gathering base page.")
# base_page=utils.get_page("http://"+BASE_SITE+projects_pages)
# time.sleep(2)
#
# #Find the project links
# print("Gathering project links.")
#
# redirect_links=GitHubSpider(base_page)
# if(test=="True"):
# end_num=5
# else:
# end_num=len(redirect_links)
#
# print("Creating jobs.")
# #Gathering pages for each project link
# for link in redirect_links[0:end_num]:
# print("Creating job for : "+link)
# link_segments=link.split('/')
# project_name=link_segments[2]
# developer_name=link_segments[1]
#
# #gathers xml page and inserts into database
# insert='''INSERT IGNORE INTO gh_jobs (datasource_id,project_name,developer_name,status,last_modified)
# VALUES(%s,%s,%s,%s,NOW())'''
# utils.db_insert(insert,datasource_id,project_name,developer_name,'XMLgathering')
#
# #Check for next link
# next_link=NextSpider(base_page)
# track_page+=1
# if next_link and track_page<less_num:
# print(next_link)
# projects_pages=next_link
# else:
# print("Final link reach.")
# hasNextPage=False
except:
print('Job creation failed.')
print(traceback.format_exc())
sys.exit()
main(sys.argv)
| Python |
'''
Created on Jul 19, 2009
This module houses all the parsers needed for GitHub
@author: Steven Norris
'''
import re
#This parses the description for the XML
def parse_description(xml):
p=re.compile('<description>.+?</description>',re.DOTALL)
results=p.findall(xml)
if(results):
description=results[0]
description=description[13:len(description)-14]
else:
description=None
return description
#this parses the forks boolean and integer for the XML
def parse_forks(xml):
boolean=re.compile('<fork type="boolean">.+?</fork>')
integer=re.compile('<forks type="integer">.+?</forks>')
results=boolean.findall(xml)
if(results):
fork_b=results[0]
fork_b=fork_b[21:len(fork_b)-7]
results=integer.findall(xml)
if(results):
fork_i=results[0]
fork_i=fork_i[22:len(fork_i)-8]
forks=(fork_b,fork_i)
else:
forks=(fork_b,None)
else:
forks=(None,None)
return forks
#this parses the private variable for the XML
def parse_private(xml):
p=re.compile('<private type="boolean">.+?</private>')
results=p.findall(xml)
if(results):
private=results[0]
private=private[24:len(private)-10]
else:
private=None
return private
#this parses the url for the xml
def parse_url(xml):
p=re.compile('<url>.+?</url>')
results=p.findall(xml)
if(results):
url=results[0]
url=url[5:len(url)-6]
else:
url=None
return url
#this parses the homepage for the xml
def parse_home(xml):
p=re.compile('<homepage>.+?</homepage>')
results=p.findall(xml)
if(results):
homepage=results[0]
homepage=homepage[10:len(homepage)-11]
else:
homepage=None
return homepage
#this parses the watchers for the xml
def parse_watch(xml):
p=re.compile('<watchers type="integer">.+?</watchers>')
results=p.findall(xml)
if(results):
watchers=results[0]
watchers=watchers[25:len(watchers)-11]
else:
watchers=None
return watchers
#this parses the open issues for the xml
def parse_issues(xml):
p=re.compile('<open-issues type="integer">.+?</open-issues>')
results=p.findall(xml)
if(results):
issues=results[0]
issues=issues[28:len(issues)-14]
else:
issues=None
return issues
| Python |
'''
Created on Jun 9, 2009
@author: Steven Norris
This module runs the jobs from github.com.
RUN INSTRUCTIONS
Run from command line using this format
[Interpret] GitHubSpider.py [DatasourceID] [Test mode True/False]
Test mode is based on string comparison so make sure capitalization and spelling are exact.
'''
from GitHubutils import GitHubutils
import time
import traceback
import sys
import GitHubParsers
BASE_SITE='github.com'
def main(argv):
XML_projects_pages="http://github.com/api/v2/xml/repos/show/"
try:
datasource_id=argv[1]
test=argv[2]
except:
print("Format arguments thusly: [program] [datasource_id] [True/False(TestMode)]")
sys.exit()
try:
#checks for test mode
if(test=='True'):
print('TEST MODE ACTIVATED')
utils=GitHubutils("dbInfoTest.txt")
else:
utils=GitHubutils("dbInfo.txt")
except:
print("Please create the dbInfo.txt and dbInfoTest.txt files. Check ReadMe for formatting.")
sys.exit()
#collects the xml for each project
print("Gathering XML.")
job=utils.get_job(datasource_id,'XMLgathering')
if(utils.error):
sys.exit()
while(job):
try:
project_name=job[0]
developer_name=job[1]
print('Collecting for '+project_name+' and '+developer_name+'.')
XML_page=utils.get_page(XML_projects_pages+developer_name+'/'+project_name)
#if project exists
if(XML_page):
XML_page=str(XML_page)
insert='''INSERT INTO gh_projects (datasource_id,project_name,developer_name,XML,last_modified)
VALUES(%s,%s,%s,%s,NOW())'''
utils.db_insert(insert,datasource_id,project_name,developer_name,XML_page)
#if project does not exist
else:
insert='''INSERT INTO gh_projects (datasource_id,projects_name, developer_name,XML,last_modified)
VALUES (%s,%s,%s,NULL,NOW())'''
utils.db_insert(insert,datasource_id,project_name,developer_name)
#sleeps, checks for errors, and gets new job
time.sleep(2)
utils.change_status('Parsing','XMLgathering',datasource_id,project_name,developer_name)
job=utils.get_job(datasource_id,'XMLgathering')
if(utils.error):
sys.exit()
#if failure occurs, posts an error and finds a new job
except:
print("!!!!WARNING!!!! gathering has failed.")
utils.post_error(traceback.format_exc(),datasource_id,project_name,developer_name)
job=utils.get_job(datasource_id,'XMLgathering')
if(utils.error):
sys.exit()
#does parsing for all parsing jobs
print("\nParsing")
job=utils.get_job(datasource_id,'Parsing')
if(utils.error):
sys.exit()
while(job):
try:
#runs parsers
print('Parsing for '+job[0]+' by '+job[1])
xml=utils.gather_xml(job[0],job[1],datasource_id)
description=GitHubParsers.parse_description(xml)
private=GitHubParsers.parse_private(xml)
url=GitHubParsers.parse_url(xml)
forks=GitHubParsers.parse_forks(xml)
forked=forks[0]
fork_number=forks[1]
homepage=GitHubParsers.parse_home(xml)
watchers=GitHubParsers.parse_watch(xml)
open_issues=GitHubParsers.parse_issues(xml)
#inserts into database
update='''UPDATE gh_projects
SET description=%s,
private=%s,
url=%s,
forked=%s,
fork_number=%s,
homepage=%s,
watchers=%s,
open_issues=%s,
last_modified=NOW()
WHERE datasource_id=%s
AND project_name=%s
AND developer_name=%s'''
utils.db_insert(update,description,private,url,forked,fork_number,homepage,
watchers,open_issues,datasource_id,job[0],job[1])
#changes status, checks for errors, and gets new job
utils.change_status('Completed','Parsing',datasource_id,job[0],job[1])
job=utils.get_job(datasource_id,'Parsing')
if(utils.error):
sys.exit()
#if failure occurs, posts error and finds new job
except:
print("!!!!WARNING!!!! parsing has failed.")
utils.post_error(traceback.format_exc(),datasource_id,job[0],job[1])
job=utils.get_job(datasource_id,'Parsing')
if(utils.error):
sys.exit()
main(sys.argv)
| Python |
'''
Created on Apr 12, 2010
This module performs the clean up for gather_60day jobs.
@author: StevenNorris
'''
import sys
import traceback
def run(utils,datasource_id):
#Cleans up gather_60day jobs
print("\nStarting 60day clean up.")
#Gets Job
job=utils.get_cleanup_job(datasource_id,'gather_60day')
if(utils.error):
sys.exit()
while(job!=None):
#Cleans up for the job
try:
unixname=job[0]
print "Cleaning up for "+unixname
utils.delete_60day(unixname,datasource_id)
utils.change_status('gather_60day','Clean_Up',datasource_id,unixname)
job=utils.get_cleanup_job(datasource_id,'gather_60day')
if(utils.error):
sys.exit()
#If process fails, post error and get new job
except:
print("!!!!WARNING!!!! Clean up 60day for "+unixname+" failed.")
utils.post_error('Clean_Up(60day):\n'+traceback.format_exc(),datasource_id,unixname)
job=utils.get_cleanup_job(datasource_id,'gather_60day')
if(utils.error):
sys.exit()
| Python |
'''
Created on Apr 12, 2010
This method is made to clean up the jobs left In_Progress by machine error and prepare them for a second run.
@author: StevenNorris
'''
from SourceForgeUtils import SourceForgeUtils
import sys
import SourceForgeIndexCleanUp
import SourceForgeDevelopmentCleanUp
import SourceForgeDevelopersCleanUp
import SourceForgeResumesCleanUp
import SourceForgeDonorsCleanUp
import SourceForgeMailingListsCleanUp
import SourceForgeMailingListsSpecificCleanUp
import SourceForgeMailingPagesCleanUp
import SourceForge60dayCleanUp
import SourceForgeYearCleanUp
#main method for running clean ups
def main(argv):
#set variables
try:
datasource_id=argv[1]
test=argv[2]
except:
print("""RUN INSTRUCTIONS
Run this module from command line with the following format:
[Interpreter] SourceForgeCleanUp.py [datasource_id] [Test T/F]
Test is a string variable. Be sure to use a capital 'T' to denote test mode.
Otherwise use 'F'.""")
sys.exit()
#Checks for test mode
try:
if (test=='T'):
print("TEST MODE ACTIVATED")
utils=SourceForgeUtils('dbInfoTest.txt')
else:
utils=SourceForgeUtils('dbInfo.txt')
except:
print("Please create the dbInfo.txt and the dbInfoTest.txt files. See ReadMe for formatting.")
sys.exit()
#Does the cleanup for the Tigris projects
SourceForgeIndexCleanUp.run(utils,datasource_id)
SourceForgeDevelopmentCleanUp.run(utils,datasource_id)
SourceForgeDevelopersCleanUp.run(utils,datasource_id)
SourceForgeResumesCleanUp.run(utils,datasource_id)
SourceForgeDonorsCleanUp.run(utils,datasource_id)
SourceForgeMailingListsCleanUp.run(utils,datasource_id)
SourceForgeMailingListsSpecificCleanUp.run(utils,datasource_id)
SourceForgeMailingPagesCleanUp.run(utils,datasource_id)
SourceForge60dayCleanUp.run(utils,datasource_id)
SourceForgeYearCleanUp.run(utils,datasource_id)
main(sys.argv) | Python |
'''
Created on Apr 12, 2010
This module performs the clean up for gather_messages jobs.
@author: StevenNorris
'''
import sys
import traceback
def run(utils,datasource_id):
#Cleans up gather_messages jobs
print("\nStarting messages clean up.")
#Gets Job
job=utils.get_cleanup_job(datasource_id,'gather_messages')
if(utils.error):
sys.exit()
while(job!=None):
#Cleans up for the job
try:
unixname=job[0]
print "Cleaning up for "+unixname
utils.delete_messages(unixname,datasource_id)
utils.change_status('gather_messages','Clean_Up',datasource_id,unixname)
job=utils.get_cleanup_job(datasource_id,'gather_messages')
if(utils.error):
sys.exit()
#If process fails, post error and get new job
except:
print("!!!!WARNING!!!! Clean up messages for "+unixname+" failed.")
utils.post_error('Clean_Up(messages):\n'+traceback.format_exc(),datasource_id,unixname)
job=utils.get_cleanup_job(datasource_id,'gather_messages')
if(utils.error):
sys.exit()
| Python |
'''
Created on Apr 12, 2010
This module performs the clean up for gather_memberlist jobs.
@author: StevenNorris
'''
import sys
import traceback
def run(utils,datasource_id):
#Cleans up gather_memberlist jobs
print("\nStarting developers clean up.")
#Gets Job
job=utils.get_cleanup_job(datasource_id,'gather_memberlist')
if(utils.error):
sys.exit()
while(job!=None):
#Cleans up for the job
try:
unixname=job[0]
print "Cleaning up for "+unixname
utils.delete_developers(unixname,datasource_id)
utils.change_status('gather_memberlist','Clean_Up',datasource_id,unixname)
job=utils.get_cleanup_job(datasource_id,'gather_memberlist')
if(utils.error):
sys.exit()
#If process fails, post error and get new job
except:
print("!!!!WARNING!!!! Clean up developers for "+unixname+" failed.")
utils.post_error('Clean_Up(developers):\n'+traceback.format_exc(),datasource_id,unixname)
job=utils.get_cleanup_job(datasource_id,'gather_developers')
if(utils.error):
sys.exit()
| Python |
'''
Created on Feb 14, 2010
This module collects the donor page for each project.
@author: Steven Norris
'''
import re
import sys
import traceback
import time
BASE_SITE='sourceforge.net/'
#The spiders the given page for the donors link
def donorsSpider(html):
matches=re.search('project/project_donations\.php\?group_id=.+?"',html)
if(matches!=None):
match=matches.group(0)
link=match[0:len(match)-1]
return link
else:
return None
#This runs the spidering for the donors pages and adds them to project_indexes
def run(utils,datasource_id):
print("\nGathering donor pages.")
#runs jobs
job=utils.get_job(datasource_id,"gather_donors")
if(utils.error):
sys.exit()
while(job!=None):
time.sleep(3)
try:
unixname=job[0]
print("\nGathering for "+unixname)
#Collects index page and spiders for link
print("Retrieving index HTML")
index=utils.get_index(datasource_id,unixname)
if(index):
index=index[0]
print("Finding Link")
link=donorsSpider(index)
#Gathering page and inserting into database
if(link):
print("Gathering page and inserting into database.")
donors=utils.get_page("http://"+BASE_SITE+link)
else:
print("Link was not found.")
donors=None
if(donors and re.search('We apologize. The page you were looking for cannot be found.',donors)==None):
update="UPDATE sf_project_indexes SET donors_html=%s WHERE datasource_id=%s AND proj_unixname=%s"
utils.db_insert(update,donors,datasource_id,unixname)
utils.change_status('completed','gather_donors',datasource_id,unixname)
#change gather_60day
job=utils.get_job(datasource_id,'gather_donors')
if(utils.error):
sys.exit()
#if donors insertion fails posts error, gets job, and checks for errors
else:
print("!! Donors page either does not exist or did not collect properly.")
utils.change_status('completed','gather_donors',datasource_id,unixname)
job=utils.get_job(datasource_id,'gather_donors')
if(utils.error):
sys.exit()
#if development doesn't collect properly posts error, gets job, and checks for errors
else:
print("!!!!WARNING!!!! Donors page did not collect correctly.")
utils.post_error('gather_memberlist:\nIndex gathering yielded a null response.',datasource_id,unixname)
job=utils.get_job(datasource_id,'gather_donors')
if(utils.error):
sys.exit()
#if collecting process fails posts error, gets job, and checks for errors
except:
print("!!!!WARNING!!!! Memberlist page did not collect correctly.")
utils.post_error('gather_memberlist:\n'+traceback.format_exc(),datasource_id,unixname)
job=utils.get_job(datasource_id,'gather_donors')
if(utils.error):
sys.exit()
| Python |
'''
Created on Apr. 12, 2010
This module performs the clean up for gather_index jobs.
@author: StevenNorris
'''
import sys
import traceback
def run(utils,datasource_id):
#Cleans up gather_index jobs
print("\nStarting index clean up.")
#Gets Job
job=utils.get_cleanup_job(datasource_id,'gather_index')
if(utils.error):
sys.exit()
while(job!=None):
#Cleans up for the job
try:
unixname=job[0]
print "Cleaning up for "+unixname
utils.delete_index(unixname,datasource_id)
utils.change_status('gather_index','Clean_Up',datasource_id,unixname)
job=utils.get_cleanup_job(datasource_id,'gather_index')
if(utils.error):
sys.exit()
#If process fails, post error and get new job
except:
print("!!!!WARNING!!!! Clean up indexes for "+unixname+" failed.")
utils.post_error('Clean_Up(index):\n'+traceback.format_exc(),datasource_id,unixname)
job=utils.get_cleanup_job(datasource_id,'gather_index')
if(utils.error):
sys.exit() | Python |
'''
Created on Dec 13, 2009
This module collects the mailing list page for each project given a utilities module and datasource_id.
@author: Steven Norris
'''
import re
import sys
import time
BASE_INDEX='sourceforge.net/projects/'
BASE_SITE='sourceforge.net/'
#This parses the mailnglist link from the given page and returns it
def mailinglist_spider(page):
match=re.search('/mail/\?group_id=.+?"',page)
if(match!=None):
return match.group(0)[1:len(match.group(0))-1]
else:
return None
#This method works as the main method
def run(utils, datasource_id):
print('\nGathering mailinglist pages.')
#Gather job and check for errors
job=utils.get_job(datasource_id,'gather_mailinglists')
if (utils.error):
sys.exit()
while(job!=None):
time.sleep(3)
unixname=job[0]
print('\nGathering for '+unixname)
#Retrive development page from database
print("Retrieving Development HTML")
development_page=utils.get_development(datasource_id,unixname)
if(development_page):
development_page=development_page[0]
#Parse out link for mailing list and gather page
print('Finding Link')
link=mailinglist_spider(development_page)
if(link):
print('Inserting Mailinglist Page')
mailinglist=utils.get_page('http://'+BASE_SITE+link)
else:
print('Link to mailing list not found.')
mailinglist=None
#Insert mailing list page into database
if(mailinglist and re.search('We apologize. The page you were looking for cannot be found.',mailinglist)==None):
update='''INSERT INTO sf_mailing_indexes (mailinglist_html,datasource_id,proj_unixname,date_collected)
VALUES(%s,%s,%s,NOW())'''
utils.db_insert(update,mailinglist,datasource_id,unixname)
utils.change_status('gather_mailinglistsspecific','gather_mailinglists',datasource_id,unixname)
job=utils.get_job(datasource_id,'gather_mailinglists')
if(utils.error):
sys.exit()
#If the page does not exist, post error and get new job
else:
print("!!!!WARNING!!!! Mailinglist page did not collect correctly.")
utils.change_status('gather_mailinglistsspecific','gather_mailinglists',datasource_id,unixname)
job=utils.get_job(datasource_id,'gather_mailinglists')
if(utils.error):
sys.exit()
#if development page does not collect properly, post error and get new job.
else:
print("!!!!WARNING!!!! Development page did not collect correctly.")
utils.post_error('gather_mailinglists:\nDevelopment gathering yielded a null response.',datasource_id,unixname)
job=utils.get_job(datasource_id,'gather_mailinglists')
if(utils.error):
sys.exit()
| Python |
'''
Created Dec. 13, 2009
This module is used to spider the mailing list pages for message pages and store them in the database.
@author: Steven Norris
'''
import re
import sys
import time
from datetime import date
BASE_INDEX='sourceforge.net/mailarchive/'
BASE_SITE='sourceforge.net/'
def mailing_month(page,yearMonth):
final_match=None
matches=re.findall('(forum.php\?forum_name=.+?&max_rows=.+?&style=.+?&viewmonth=.+?)">\((\d+?)\)</a>',page)
if matches:
matchSet=matches[0]
match=matchSet[0]
num=matchSet[1]
match=match.replace('&','&')
final_match=(match[0:match.find('max_rows')+10]+num+
match[match.find('&style='):match.find('&style=')+7]+'flat'+
match[match.find('&viewmonth='):match.find('&viewmonth=')+12]+yearMonth)
return final_match
def run(utils,datasource_id):
print('\nGathering message pages.')
#Gather job and check for errors
job=utils.get_job(datasource_id,'gather_messages')
if (utils.error):
sys.exit()
while(job!=None):
time.sleep(3)
unixname=job[0]
print('\nGathering for '+unixname)
#Retrieve mailinglists page from database
print("Retrieving Specific Mailing List HTML")
mailing_pages=utils.get_mailing_specific(datasource_id,unixname)
if(mailing_pages):
#Gathering links for each year and month
for page in mailing_pages:
list=page[0]
html=page[1]
print('*Retrieving for '+list)
today=date.today()
formated=today.strftime("%Y%m")
month_link=mailing_month(html,formated)
if(month_link):
#Gather pages for each link
year=month_link[len(month_link)-6:len(month_link)-2]
month=month_link[len(month_link)-2:]
time.sleep(3)
print('**Collecting for '+month+':'+year)
print('**Using link: '+month_link)
page=utils.get_page('http://'+BASE_INDEX+month_link)
#Insert each page into databse
if(page and re.search('We apologize. The page you were looking for cannot be found.',page)==None):
insert='''INSERT INTO sf_mailing_pages_indexes (proj_unixname,list_name,year,month,messages_html,datasource_id,date_collected)
VALUES (%s,%s,%s,%s,%s,%s,NOW())'''
print('**Inserting into database')
utils.db_insert(insert,unixname,list,year,month,page,datasource_id)
#If page doesn't exist, print warning
else:
print('**Link '+month_link+ 'either led to a faulty page or did not exist.')
#If links don't exist, set status, get job, and check for errors
else:
print("*!!Specific Mailing List Pages do not Exist!!.")
#Change status, get job, and check for errors
utils.change_status('completed','gather_messages',datasource_id,unixname)
job=utils.get_job(datasource_id,'gather_messages')
if(utils.error):
sys.exit()
#If specific mailing lists don't exist, change status, get job, and check for errors
else:
print("!!Specific Mailing Lists do not Exist!!")
utils.change_status('completed','gather_messages',datasource_id,unixname)
job=utils.get_job(datasource_id,'gather_messages')
if(utils.error):
sys.exit()
| Python |
'''
Created on Apr 12, 2010
This module performs the clean up for gather_mailinglistsspecific jobs.
@author: StevenNorris
'''
import sys
import traceback
def run(utils,datasource_id):
#Cleans up gather_mailinglistsspecfic jobs
print("\nStarting mailing lists specific clean up.")
#Gets Job
job=utils.get_cleanup_job(datasource_id,'gather_mailinglistsspecific')
if(utils.error):
sys.exit()
while(job!=None):
#Cleans up for the job
try:
unixname=job[0]
print "Cleaning up for "+unixname
utils.delete_mailinglistsspecific(unixname,datasource_id)
utils.change_status('gather_mailinglistsspecific','Clean_Up',datasource_id,unixname)
job=utils.get_cleanup_job(datasource_id,'gather_mailinglistsspecific')
if(utils.error):
sys.exit()
#If process fails, post error and get new job
except:
print("!!!!WARNING!!!! Clean up mailing lists specific for "+unixname+" failed.")
utils.post_error('Clean_Up(mailing lists specific):\n'+traceback.format_exc(),datasource_id,unixname)
job=utils.get_cleanup_job(datasource_id,'gather_mailinglistsspecific')
if(utils.error):
sys.exit() | Python |
'''
Created on Aug 16, 2009
This module is designed to populate the jobs database for sourceforge.net.
RUN INSTRUCTIONS
Run this module from command line with the following format:
[Interpreter] SourceForgeJobs.py [datasource_id] [Test T/F]
Test is a string variable. Be sure to use a capital 'T' to denote test mode.
Otherwise use 'F'.
@author: StevenNorris
'''
import sys
from SourceForgeUtils import SourceForgeUtils
import traceback
import socket
#adds the jobs to the sf_jobs table in the selected database
def main(argv):
#set variables
try:
datasource_id=argv[1]
test=argv[2]
except:
print ("""RUN INSTRUCTIONS\n
Run this module from command line with the following format:\n
[Interpreter] SourceForgeJobs.py [datasource_id] [Test T/F]\n
Test is a string variable. Be sure to use a capital 'T' to denote test mode.\n
Otherwise use 'F'.""")
sys.exit()
#checks for test mode
if(test=='T'):
try:
print("TEST MODE ACTIVATED")
utils=SourceForgeUtils('dbInfoTest.txt')
except:
print("Please create the dbInfo.txt and the dbInfoTest.txt files. See ReadMe for formatting.")
sys.exit()
else:
try:
utils=SourceForgeUtils('dbInfo.txt')
except:
print("Please create the dbInfo.txt and the dbInfoText.txt files. See ReadMe for formatting.")
sys.exit()
#gathering project unixnames
try:
print("Gathering unixnames.")
projects_list=utils.get_projects(datasource_id)
#checks test mode for project amount to be collected
if(test=='T'):
end=50
else:
end=len(projects_list)
#adds jobs to database
try:
print("Creating Jobs")
for project in projects_list[0:end]:
project=project[0]
print("Creating job for "+project)
try:
insert='''INSERT INTO sf_jobs (unixname,datasource_id,status,last_modified,modified_by)
VALUES(%s,%s,'gather_index',NOW(),%s)'''
utils.db_insert(insert,project,datasource_id,socket.gethostname())
except:
print('!!!!WARNING!!!! Job creation failed for '+project+'.')
print(traceback.format_exc())
except:
print('!!!!WARNING!!!! Jobs did not create succesfully')
print(traceback.format_exc())
except:
print('!!!!WARNING!!!! Projects unixnames not collected properly.')
print(traceback.format_exc())
main(sys.argv) | Python |
'''
Created on Dec 13, 2009
This module is used to spider the main mailing list page for each project and insert it's subsequent specific mailing list pages into the database.
@author: Steven Norris
'''
import re
import sys
import time
BASE_INDEX='sourceforge.net/projects/'
BASE_SITE='sourceforge.net/'
#This method spiders the main mailing list page for specific mailing list links
def mailinglists_spider(page):
matches=[]
start_matches=re.findall('/mailarchive/forum.php\?forum_name=.+?>',page)
if(start_matches):
for match in start_matches:
if matches.count(match[1:len(match)-2] )==0:
matches.append(match[1:len(match)-2])
return matches
else:
return None
#This method runs the main spidering for the module
def run(utils,datasource_id):
print('\nGathering specific mailinglist pages.')
#Gather job and check for errors
job=utils.get_job(datasource_id,'gather_mailinglistsspecific')
if (utils.error):
sys.exit()
while(job!=None):
time.sleep(3)
unixname=job[0]
print('\nGathering for '+unixname)
#Retrive mailinglists page from database
print("Retrieving Mailinglists HTML")
mailing_page=utils.get_mailing(datasource_id,unixname)
if(mailing_page):
mailing_page=mailing_page[0]
#Parse out links for mailing listss
print('Finding Links')
links=mailinglists_spider(mailing_page)
#Gather pages for each link
if(links):
for link in links:
print('Inserting Mailinglist Page '+link)
name=link[33:]
mailinglist=utils.get_page('http://'+BASE_SITE+link)
#Insert page into database
if(mailinglist and re.search('We apologize. The page you were looking for cannot be found.',mailinglist)==None):
update='''INSERT INTO sf_mailinglist_indexes (proj_unixname,mailinglist_html, datasource_id, list_name,date_collected)
VALUES(%s,%s,%s,%s,NOW())'''
utils.db_insert(update,unixname,mailinglist,datasource_id,name)
#Print warning if page does not exist
else:
print('Link '+link+ 'either led to a faulty page or did not exist.')
#Change status, get job, and check for errors
utils.change_status('gather_messages','gather_mailinglistsspecific',datasource_id,unixname)
job=utils.get_job(datasource_id,'gather_mailinglistsspecific')
if(utils.error):
sys.exit()
#Print warning if links do not exist
else:
print("!!Specific Mailing Lists do not Exist!!.")
utils.change_status('gather_messages','gather_mailinglistsspecific',datasource_id,unixname)
job=utils.get_job(datasource_id,'gather_mailinglistsspecific')
if(utils.error):
sys.exit()
#if development page does not collect properly, post error and get new job.
else:
print("!!!!WARNING!!!! Mailinglist page did not collect correctly.")
utils.change_status('gather_messages','gather_mailiinglistsspecific',datasource_id,unixname)
job=utils.get_job(datasource_id,'gather_mailinglistsspecific')
if(utils.error):
sys.exit()
| Python |
'''
Created on Aug 16, 2009
This module includes the necessary utilities for the source forge spider.
@author: StevenNorris
'''
import MySQLdb
import traceback
import urllib2
import socket
class SourceForgeUtils:
#this gathers the initial connection to the database
def __init__(self,file_name):
try:
dbfile = open(file_name, 'r')
except:
print(traceback.format_exc())
raise Exception("Database file error: "+file_name)
self.host = dbfile.readline().strip()
self.port = int(dbfile.readline().strip())
self.username = dbfile.readline().strip()
self.password = dbfile.readline().strip()
self.database = dbfile.readline().strip()
self.db=MySQLdb.connect(host=self.host, user=self.username, passwd=self.password, db=self.database)
self.cursor = self.db.cursor()
self.error=False
'''
This method provides the ability to gather a page
'''
def get_page(self,url):
try:
response = urllib2.urlopen(url)
html = response.read()
return html
except:
print ("The page request failed.")
'''
This method provides the ability to insert into a database
'''
def db_insert(self,query_string,*params):
try:
self.cursor.execute(query_string, params)
except:
print("!!!!WARNING!!!! Insertion into "+self.database+" failed.")
print(traceback.format_exc())
'''
This method provides the ability to get a job from the job database.
'''
def get_job(self, datasource_id, status):
lock = '''LOCK TABLE sf_jobs READ, sf_jobs AS t WRITE'''
select = '''SELECT unixname
FROM sf_jobs AS t
WHERE status = %s
AND datasource_id = %s
ORDER BY unixname
LIMIT 1'''
update='''UPDATE sf_jobs AS t SET status='In_Progress', last_modified=NOW()
WHERE datasource_id=%s
AND unixname=%s
'''
unlock = '''UNLOCK TABLES'''
try:
self.cursor.execute(lock)
self.cursor.execute(select, (status,datasource_id))
result = self.cursor.fetchone()
self.cursor.execute(update,(datasource_id, result[0]))
self.cursor.execute(unlock)
return result
except:
print ("Finding job failed.")
self.cursor.execute(unlock)
#this method allows for status changes
def change_status(self,status,previous,datasource_id,unixname):
update='''UPDATE sf_jobs
SET status=%s, last_modified=NOW(), previous_stage=%s, modified_by=%s
WHERE datasource_id=%s
AND unixname=%s
'''
try:
self.cursor.execute(update,(status,previous,socket.gethostname(),datasource_id,unixname))
except:
print('!!!!WARNING!!!! Status '+status+' did not update correctly for '+unixname+' with id '+datasource_id+'.')
print(traceback.format_exc())
self.error=True
#this method allows for error posting
def post_error(self,message,datasource_id,unixname):
update='''UPDATE sf_jobs
SET error_msg=%s, status='error', last_modified=NOW(), modified_by=%s
WHERE datasource_id=%s
AND unixname=%s'''
try:
self.cursor.execute(update,(message,socket.gethostname(),datasource_id,unixname))
except:
print('!!!!WARNING!!!! Error '+message+'could not be posted for'+unixname+' at '+datasource_id+'.')
self.error=True
#Gathers the projects list from projects_list
def get_projects(self,datasource_id):
try:
select="SELECT proj_unixname FROM sf_projects WHERE datasource_id=%s"
self.cursor.execute(select,(datasource_id))
projects_list=self.cursor.fetchall()
return projects_list
except:
print("!!!!WARNING!!! Collecting projects list failed.")
#Gathers the index html from project_indexes
def get_index(self,datasource_id,unixname):
try:
select="SELECT indexhtml FROM sf_project_indexes WHERE datasource_id=%s AND proj_unixname=%s LIMIT 1"
self.cursor.execute(select,(datasource_id,unixname))
index_page=self.cursor.fetchone()
return index_page
except:
print("!!!!WARNING!!!! Collecting index page failed.")
print(traceback.format_exc())
def get_development(self,datasource_id,unixname):
try:
select='''SELECT development_html FROM sf_project_indexes WHERE datasource_id=%s AND proj_unixname=%s LIMIT 1'''
self.cursor.execute(select,(datasource_id,unixname))
dev_page=self.cursor.fetchone()
return dev_page
except:
print("!!!!WARNING!!!! Collecting development page failed.")
print(traceback.format_exc())
def get_memberlist(self,datasource_id,unixname):
try:
select='''SELECT developers_html FROM sf_project_indexes WHERE datasource_id=%s AND proj_unixname=%s'''
self.cursor.execute(select,(datasource_id,unixname))
memberlist=self.cursor.fetchone()
return memberlist
except:
print("!!!!WARNING!!!! Collecting memberlist page failed.")
print(traceback.format_exc())
def get_mailing(self,datasource_id,unixname):
try:
select='''SELECT mailinglist_html FROM sf_mailing_indexes WHERE datasource_id=%s AND proj_unixname=%s'''
self.cursor.execute(select,(datasource_id,unixname))
mailing=self.cursor.fetchone()
return mailing
except:
print("!!!!WARNING!!!! Collecting mailing page failed.")
print(traceback.format_exc())
def get_mailing_specific(self,datasource_id,unixname):
try:
select='''SELECT list_name, mailinglist_html FROM sf_mailinglist_indexes WHERE datasource_id=%s AND proj_unixname=%s'''
self.cursor.execute(select,(datasource_id,unixname))
mailinglists=self.cursor.fetchall()
return mailinglists
except:
print("!!!!WARNING!!!! Collecting mailinglist page failed.")
print(traceback.format_exc())
'''
This method provides the ability to get a clean up job from the job database.
'''
def get_cleanup_job(self, datasource_id, previousStage):
lock = '''LOCK TABLE sf_jobs READ, sf_jobs AS t WRITE'''
select = '''SELECT unixname
FROM sf_jobs AS t
WHERE status = 'In_Progress'
AND datasource_id = %s
AND previous_stage = %s
ORDER BY unixname
LIMIT 1'''
update='''UPDATE sf_jobs AS t SET status='Clean_Up', last_modified=NOW()
WHERE datasource_id=%s
AND unixname=%s
'''
unlock = '''UNLOCK TABLES'''
try:
self.cursor.execute(lock)
self.cursor.execute(select, (datasource_id,previousStage))
result = self.cursor.fetchone()
self.cursor.execute(update,(datasource_id, result[0]))
self.cursor.execute(unlock)
return result
except:
print ("Finding job failed.")
self.cursor.execute(unlock)
#This method allows for the deletion of a project from the project_indexes
def delete_index(self,unixname,datasource_id):
try:
update="""DELETE FROM sf_project_indexes WHERE proj_unixname=%s AND datasource_id=%s"""
self.cursor.execute(update,(unixname,datasource_id))
except:
print("!!!!WARNING!!!! Deletion of index failed.")
print (traceback.format_exc())
#This method allows for the deletion of a development page for a project from the project_indexes
def delete_development(self,unixname,datasource_id):
try:
update="""UPDATE sf_project_indexes SET development_html=NULL WHERE proj_unixname=%s AND datasource_id=%s"""
self.cursor.execute(update,(unixname,datasource_id))
except:
print("!!!!WARNING!!!! Deletion of development page failed.")
print (traceback.format_exc())
#This method allows for the deletion of a developers page for a project from the project_indexes
def delete_developers(self,unixname,datasource_id):
try:
update="""UPDATE sf_project_indexes SET developers_html=NULL WHERE proj_unixname=%s AND datasource_id=%s"""
self.cursor.execute(update,(unixname,datasource_id))
except:
print("!!!!WARNING!!!! Deletion of developers page failed.")
print (traceback.format_exc())
#This method allows for the deletion of a donors page for a project from the project_indexes
def delete_donors(self,unixname,datasource_id):
try:
update="""UPDATE sf_project_indexes SET donors_html=NULL WHERE proj_unixname=%s AND datasource_id=%s"""
self.cursor.execute(update,(unixname,datasource_id))
except:
print("!!!!WARNING!!!! Deletion of donors page failed.")
print (traceback.format_exc())
#This method allows for the deletion of a mailing list pages for a project from the mailing_indexes
def delete_mailinglists(self,unixname,datasource_id):
try:
update="""DELETE FROM sf_mailing_indexes WHERE proj_unixname=%s AND datasource_id=%s"""
self.cursor.execute(update,(unixname,datasource_id))
except:
print("!!!!WARNING!!!! Deletion of mailing lists failed.")
print (traceback.format_exc())
#This method allows for the deletion of a mailing list pages for a project from the mailinglist_indexes
def delete_mailinglistsspecific(self,unixname,datasource_id):
try:
update="""DELETE FROM sf_mailinglist_indexes WHERE proj_unixname=%s AND datasource_id=%s"""
self.cursor.execute(update,(unixname,datasource_id))
except:
print("!!!!WARNING!!!! Deletion of mailing lists failed.")
print (traceback.format_exc())
#This method allows for the deletion of a mailing list pages for a project from the mailing_pages_indexes
def delete_messages(self,unixname,datasource_id):
try:
update="""DELETE FROM sf_mailing_pages_indexes WHERE proj_unixname=%s AND datasource_id=%s"""
self.cursor.execute(update,(unixname,datasource_id))
except:
print("!!!!WARNING!!!! Deletion of messages failed.")
print (traceback.format_exc())
#This method allows for the deletion of a 60day page for a project from the sf_project_indexes
def delete_60day(self,unixname,datasource_id):
try:
update="""UPDATE sf_project_indexes SET statistics_html=NULL WHERE proj_unixname=%s AND datasource_id=%s"""
self.cursor.execute(update,(unixname,datasource_id))
except:
print("!!!!WARNING!!!! Deletion of 60day page failed.")
print (traceback.format_exc())
#This method allows for the deletion of a 60day page for a project from the sf_project_indexes
def delete_year(self,unixname,datasource_id):
try:
update="""UPDATE sf_project_indexes SET all_time_stats_html=NULL WHERE proj_unixname=%s AND datasource_id=%s"""
self.cursor.execute(update,(unixname,datasource_id))
except:
print("!!!!WARNING!!!! Deletion of year page failed.")
print (traceback.format_exc())
| Python |
'''
Created on Sep 28, 2009
This module spiders the yearly stats page for each job and prepares for individual developer spidering.
@author: Steven Norris
'''
import re
import sys
import traceback
import time
BASE_INDEX='sourceforge.net/projects/'
BASE_SITE='sourceforge.net/'
#This spider finds the link fot the stats pag eon the development page
def statsSpider(page):
match=re.search('group_id=.+?"',page)
if(match!=None):
link=match.group(0)
return link[9:len(link)-11]
else:
return None
def run(utils,datasource_id):
#collects the yearstats pages for each job and adds them to sv_indexes
print("\nGathering yearstats pages.")
#runs jobs
job=utils.get_job(datasource_id,'gather_year')
if(utils.error):
sys.exit()
while(job!=None):
time.sleep(3)
try:
unixname=job[0]
print("\nGathering for "+unixname)
#collects development page and crawls for yearstats link
print("Retrieving development HTML.")
dev_page=utils.get_development(datasource_id,unixname)
dev_page=dev_page[0]
if(dev_page):
print("Finding link.")
id=statsSpider(dev_page)
#inserts yearstats page into project_indexes
if(id):
print("Inserting yearstats page.")
year=utils.get_page("http://"+BASE_SITE+"project/stats/?group_id="+id+"&ugn="+unixname+"&type&mode=alltime")
else:
print("No group id found.")
year=None
if(year and re.search('We apologize. The page you were looking for cannot be found.',year)==None):
i=0
while(re.search("Connection to statistics server timed out",year)!=None and i<5):
year=utils.get_page("http://"+BASE_SITE+"project/stats/?group_id="+id+"&ugn="+unixname+"&type&mode=alltime")
i+=1
if(re.search("Connection to statistics server timed out",year)==None):
update="UPDATE sf_project_indexes SET all_time_stats_html=%s WHERE datasource_id=%s AND proj_unixname=%s"
utils.db_insert(update,year,datasource_id,unixname)
#changed gather_resumes
utils.change_status('completed','gather_year',datasource_id,unixname)
job=utils.get_job(datasource_id,'gather_year')
if(utils.error):
sys.exit()
else:
print("!!!!WARNING!!!! yearstats page timed out.")
utils.change_status('completed','gather_year',datasource_id,unixname)
insert='''INSERT INTO sf_jobs (unixname,datasource_id,status,last_modified)
VALUES(%s,%s,%s,NOW())'''
utils.db_insert(insert,unixname,datasource_id,'error_year')
job=utils.get_job(datasource_id,'gather_year')
if(utils.error):
sys.exit()
#if yearstats insertion fails posts error, gets job, and checks for errors
else:
print("yearstats page does not exist.")
utils.change_status('completed','gather_year',datasource_id,unixname)
job=utils.get_job(datasource_id,'gather_year')
if(utils.error):
sys.exit()
#if development doesn't collect properly posts error, gets job, and checks for errors
else:
print("!!!!WARNING!!!! yearstats page did not collect correctly.")
utils.post_error('gather_year:\nDevelopment gathering yielded a null response.',datasource_id,unixname)
job=utils.get_job(datasource_id,'gather_year')
if(utils.error):
sys.exit()
#if collecting process fails posts error, gets job, and checks for errors
except:
print("!!!!WARNING!!!! yearstats page did not collect correctly.")
utils.post_error('gather_year:\n'+traceback.format_exc(),datasource_id,unixname)
job=utils.get_job(datasource_id,'gather_year')
if(utils.error):
sys.exit()
| Python |
'''
Created on Apr 12, 2010
This module performs the clean up for gather_year jobs.
@author: StevenNorris
'''
import sys
import traceback
def run(utils,datasource_id):
#Cleans up gather_60day jobs
print("\nStarting year clean up.")
#Gets Job
job=utils.get_cleanup_job(datasource_id,'gather_year')
if(utils.error):
sys.exit()
while(job!=None):
#Cleans up for the job
try:
unixname=job[0]
print "Cleaning up for "+unixname
utils.delete_year(unixname,datasource_id)
utils.change_status('gather_year','Clean_Up',datasource_id,unixname)
job=utils.get_cleanup_job(datasource_id,'gather_year')
if(utils.error):
sys.exit()
#If process fails, post error and get new job
except:
print("!!!!WARNING!!!! Clean up year for "+unixname+" failed.")
utils.post_error('Clean_Up(year):\n'+traceback.format_exc(),datasource_id,unixname)
job=utils.get_cleanup_job(datasource_id,'gather_year')
if(utils.error):
sys.exit()
| Python |
'''
Created on Apr 12, 2010
This module performs the clean up for gather_development jobs.
@author: StevenNorris
'''
import sys
import traceback
def run(utils,datasource_id):
#Cleans up gather_development jobs
print("\nStarting development clean up.")
#Gets Job
job=utils.get_cleanup_job(datasource_id,'gather_development')
if(utils.error):
sys.exit()
while(job!=None):
#Cleans up for the job
try:
unixname=job[0]
print "Cleaning up for "+unixname
utils.delete_development(unixname,datasource_id)
utils.change_status('gather_development','Clean_Up',datasource_id,unixname)
job=utils.get_cleanup_job(datasource_id,'gather_development')
if(utils.error):
sys.exit()
#If process fails, post error and get new job
except:
print("!!!!WARNING!!!! Clean up development for "+unixname+" failed.")
utils.post_error('Clean_Up(development):\n'+traceback.format_exc(),datasource_id,unixname)
job=utils.get_cleanup_job(datasource_id,'gather_development')
if(utils.error):
sys.exit()
| Python |
'''
Created on Oct 14, 2009
This module is used after an initial run to rerun any timed out errors.
@author: Steven Norris
'''
from SourceForgeUtils import SourceForgeUtils
import sys
import SourceForge60day
import SourceForgeYear
#!!!!WARNING!!!!! Running this error fix will only collect 60day stats and year stats that have timed out.
# To complete the process for these projects please rerun the SourceForgeSpider for the datasource_id.
def main(argv):
try:
datasource_id=argv[1]
test=argv[2]
except:
print("""RUN INSTRUCTIONS
Run this module from command line with the following format:
[Interpreter] SourceForgeJobs.py [datasource_id] [Test T/F]
Test is a string variable. Be sure to use a capital 'T' to denote test mode.
Otherwise use 'F'.""")
sys.exit()
#Checks for test mode
if (test=='T'):
print("TEST MODE ACTIVATED")
utils=SourceForgeUtils('dbInfoTest.txt')
else:
utils=SourceForgeUtils('dbInfo.txt')
print('Running Error Fixes')
#runs jobs to prepare for reruns
job=utils.get_job(datasource_id,'error_60day')
while(job!=None):
unixname=job[0]
utils.change_status('gather_60day','error_60day',datasource_id,unixname)
job=utils.get_job(datasource_id,'error_60day')
if(utils.error):
sys.exit()
SourceForge60day.run(utils,datasource_id)
job=utils.get_job(datasource_id,'error_year')
while(job!=None):
unixname=job[0]
utils.change_status('gather_year','error_year',datasource_id,unixname)
job=utils.get_job(datasource_id,'error_year')
if(utils.error):
sys.exit()
SourceForgeYear.run(utils,datasource_id)
main(sys.argv)
| Python |
'''
Created on Sep 28, 2009
This module spiders the 60 day stats page for each job and prepares for yearly spidering.
@author: Steven Norris
'''
import re
import sys
import traceback
import time
BASE_INDEX='sourceforge.net/projects/'
BASE_SITE='sourceforge.net/'
#This spider finds the link fot the stats pag eon the development page
def statsSpider(page):
match=re.search('group_id=.+?"',page)
if(match!=None):
link=match.group(0)
return link[9:len(link)-11]
else:
return None
def run(utils,datasource_id):
#collects the 60daystats pages for each job and adds them to sv_indexes
print("\nGathering 60daystats pages.")
#runs jobs
job=utils.get_job(datasource_id,'gather_60day')
if(utils.error):
sys.exit()
while(job!=None):
time.sleep(3)
try:
unixname=job[0]
print("\nGathering for "+unixname)
#collects development page and crawls for 60day link
print("Retrieving development HTML.")
dev_page=utils.get_development(datasource_id,unixname)
dev_page=dev_page[0]
if(dev_page):
print("Finding link.")
id=statsSpider(dev_page)
#inserts 60day page into project_indexes
if(id):
print("Inserting 60daystats page.")
stats60=utils.get_page("http://"+BASE_SITE+"project/stats/?group_id="+id+"&ugn="+unixname+"&type&mode=60day")
else:
print("No group id found.")
stats60=None
if(stats60 and re.search('We apologize. The page you were looking for cannot be found.',stats60)==None):
i=0
while(re.search("Connection to statistics server timed out",stats60)!=None and i<5):
stats60=utils.get_page("http://"+BASE_SITE+"project/stats/?group_id="+id+"&ugn="+unixname+"&type&mode=60day")
i+=1
if(re.search("Connection to statistics server timed out",stats60)==None):
update="UPDATE sf_project_indexes SET statistics_html=%s WHERE datasource_id=%s AND proj_unixname=%s"
utils.db_insert(update,stats60,datasource_id,unixname)
utils.change_status('gather_year','gather_60day',datasource_id,unixname)
job=utils.get_job(datasource_id,'gather_60day')
if(utils.error):
sys.exit()
else:
print("!!!!WARNING!!!! 60daystats page timed out.")
utils.change_status('gather_year','gather_60day',datasource_id,unixname)
utils.post_error('gather_60day:\n60daystats page timed out.',datasource_id,unixname)
insert='''INSERT INTO sf_jobs (unixname,datasource_id,status,last_modified)
VALUES(%s,%s,%s,NOW())'''
utils.db_insert(insert,unixname,datasource_id,'error_60day')
job=utils.get_job(datasource_id,'gather_60day')
if(utils.error):
sys.exit()
#if 60day insertion fails posts error, gets job, and checks for errors
else:
print("60daystats page does not exist.")
utils.change_status('gather_year','gather_60day',datasource_id,unixname)
job=utils.get_job(datasource_id,'gather_60day')
if(utils.error):
sys.exit()
#if development doesn't collect properly posts error, gets job, and checks for errors
else:
print("!!!!WARNING!!!! 60daystats page did not collect correctly.")
utils.post_error('gather_60day:\nDevelopment gathering yielded a null response.',datasource_id,unixname)
job=utils.get_job(datasource_id,'gather_60day')
if(utils.error):
sys.exit()
#if collecting process fails posts error, gets job, and checks for errors
except:
print("!!!!WARNING!!!! 60daystats page did not collect correctly.")
utils.post_error('gather_60day:\n'+traceback.format_exc(),datasource_id,unixname)
job=utils.get_job(datasource_id,'gather_60day')
if(utils.error):
sys.exit() | Python |
'''
Created on Sep 28, 2009
This module spiders the developers page for each project and prepares for 60 day stats spidering.
@author: Steven Norris
'''
import re
import sys
import traceback
import time
BASE_INDEX='sourceforge.net/projects/'
BASE_SITE='sourceforge.net/'
#This spider finds the memberlist link on the development page
def memberlistSpider(page):
match=re.search('project/memberlist\.php.+?"',page)
if(match!=None):
link=match.group(0)
link=link[0:len(link)-1]
return link
else:
return None
def run(utils,datasource_id):
#collects the memberlist pages for each job and adds them to project_indexes
print("\nGathering memberlist pages.")
#runs jobs
job=utils.get_job(datasource_id,'gather_memberlist')
if(utils.error):
sys.exit()
while(job!=None):
time.sleep(3)
try:
unixname=job[0]
print("\nGathering for "+unixname)
#collects development page and crawls for memberlist link
print("Retrieving development HTML.")
dev_page=utils.get_development(datasource_id,unixname)
if(dev_page):
dev_page=dev_page[0]
print("Finding link.")
link=memberlistSpider(dev_page)
#inserts memberlist page into project_indexes
if(link):
print("Inserting memberlist page.")
memberlist=utils.get_page("http://"+BASE_SITE+link)
else:
print("Link was not found.")
memberlist=None
if(memberlist and re.search('We apologize. The page you were looking for cannot be found.',memberlist)==None):
update="UPDATE sf_project_indexes SET developers_html=%s WHERE datasource_id=%s AND proj_unixname=%s"
utils.db_insert(update,memberlist,datasource_id,unixname)
utils.change_status('gather_resumes','gahter_memberlist',datasource_id,unixname)
#change gather_60day
job=utils.get_job(datasource_id,'gather_memberlist')
if(utils.error):
sys.exit()
#if memberlist insertion fails posts error, gets job, and checks for errors
else:
print("!!!!WARNING!!!! Memberlist page did not collect correctly.")
utils.post_error('gather_memberlist:\nMemberlist page either did not exist or led to a faulty page.',datasource_id,unixname)
job=utils.get_job(datasource_id,'gather_memberlist')
if(utils.error):
sys.exit()
#if development doesn't collect properly posts error, gets job, and checks for errors
else:
print("!!!!WARNING!!!! Memberlist page did not collect correctly.")
utils.post_error('gather_memberlist:\nDevelopment gathering yielded a null response.',datasource_id,unixname)
job=utils.get_job(datasource_id,'gather_memberlist')
if(utils.error):
sys.exit()
#if collecting process fails posts error, gets job, and checks for errors
except:
print("!!!!WARNING!!!! Memberlist page did not collect correctly.")
utils.post_error('gather_memberlist:\n'+traceback.format_exc(),datasource_id,unixname)
job=utils.get_job(datasource_id,'gather_memberlist')
if(utils.error):
sys.exit()
| Python |
'''
Created on Sep 28, 2009
This module spiders the individual developer pages for each job and completes the job cycle.
@author: Steven Norris
'''
import re
import sys
import traceback
import time
BASE_INDEX='sourceforge.net/projects/'
BASE_SITE='sourceforge.net/'
def developersSpider(page):
match=re.compile('<tr class=".+?">.+?</tr>',re.DOTALL)
links=match.findall(page);
return links
def resumeSpider(page):
link=re.findall('people/viewprofile.php.+?"',page)
if (link):
link=link[0]
else:
link=None
return link
def profileSpider(page):
link=re.findall('users/.+?/',page)
link=link[0]
return link
def run(utils,datasource_id):
#collects the developer pages for each job and adds them to sv_indexes
print("\nGathering developer pages.")
#runs jobs
job=utils.get_job(datasource_id,'gather_resumes')
if(utils.error):
sys.exit()
while(job!=None):
time.sleep(3)
try:
unixname=job[0]
print("\nGathering for "+unixname)
#collects memberlist page and crawls for developer links
print("Retrieving memberlist HTML.")
members=utils.get_memberlist(datasource_id,unixname)
members=members[0]
if(members):
print("Finding link.")
links=developersSpider(members)
#inserts developer pages into project_indexes
if(links):
print("Inserting developer pages.")
for link in links:
profileLink=profileSpider(link)
if (profileLink):
time.sleep(3)
profile=utils.get_page("http://"+BASE_SITE+profileLink)
userName=profileLink[6:len(profileLink)-1]
print("Finding pages for "+userName)
if(profile and re.search('We apologize. The page you were looking for cannot be found.',profile)==None):
insert='''INSERT IGNORE INTO sf_developer_indexes (dev_loginname,profile_html,date_collected,datasource_id)
VALUES(%s,%s,NOW(),%s)'''
utils.db_insert(insert,userName,profile,datasource_id)
resumeLink=resumeSpider(link)
if (resumeLink):
time.sleep(3)
resume=utils.get_page("http://"+BASE_SITE+resumeLink)
if(resume and re.search('We apologize. The page you were looking for cannot be found.',resume)==None):
update='''UPDATE sf_developer_indexes SET skills_html=%s WHERE datasource_id=%s AND dev_loginname=%s'''
utils.db_insert(update,resume,datasource_id,userName)
else:
print("!!!!WARNING!!!! Resume page led to a faulty page or did not exist for "+userName)
print(resumeLink);
utils.post_error('gather_resumes:\nA resume page either did not exist or led to a faulty page.',datasource_id, unixname)
job=utils.get_job(datasource_id,'gather_resumes')
if(utils.error):
sys.exit()
else:
print("!!!!WARNING!!!! Resume pages did not collect correctly for "+userName)
utils.post_error('gather_resumes:\nA profile page either did not exist or led to a faulty page.',datasource_id,unixname)
job=utils.get_job(datasource_id,'gather_resumes')
if(utils.error):
sys.exit()
else:
print("!!!!WARNING!!!! resumeLink does not exist.")
utils.post_error('gather_resumes:\nA resume page either did not exist or led to a faulty page.',datasource_id,unixname)
job=utils.get_job(datasource_id,'gather_resumes')
if(utils.error):
sys.exit()
#change completed
utils.change_status('completed','gather_resumes',datasource_id,unixname)
job=utils.get_job(datasource_id,'gather_resumes')
if(utils.error):
sys.exit()
else:
print("!!!!WARNING!!!! Links to developer pages not found.")
utils.post_error('gather_resumes:\nResume links were not found.',datasource_id,unixname)
job=utils.get_job(datasource_id,'gather_resumes')
if(utils.error):
sys.exit()
#if memberlist doesn't collect properly posts error, gets job, and checks for errors
else:
print("!!!!WARNING!!!! Developer pages did not collect correctly.")
utils.post_error('gather_resumes:\nIndex gathering yielded a null response.',datasource_id,unixname)
job=utils.get_job(datasource_id,'gather_resumes')
if(utils.error):
sys.exit()
#if collecting process fails posts error, gets job, and checks for errors
except:
print("!!!!WARNING!!!! Developer pages did not collect correctly.")
utils.post_error('gather_resumes:\n'+traceback.format_exc(),datasource_id,unixname)
job=utils.get_job(datasource_id,'gather_resumes')
if(utils.error):
sys.exit()
| Python |
'''
Created on Apr 12, 2010
This module performs the clean up for gather_resumes jobs.
@author: StevenNorris
'''
import sys
import traceback
def run(utils,datasource_id):
#Cleans up gather_resumes jobs
print("\nStarting resumes clean up.")
#Gets Job
job=utils.get_cleanup_job(datasource_id,'gather_resumes')
if(utils.error):
sys.exit()
while(job!=None):
#Cleans up for the job
try:
unixname=job[0]
print "Cleaning up for "+unixname
utils.change_status('gather_resumes','Clean_Up',datasource_id,unixname)
job=utils.get_cleanup_job(datasource_id,'gather_resumes')
if(utils.error):
sys.exit()
#If process fails, post error and get new job
except:
print("!!!!WARNING!!!! Clean up resumes for "+unixname+" failed.")
utils.post_error('Clean_Up(resumes):\n'+traceback.format_exc(),datasource_id,unixname)
job=utils.get_cleanup_job(datasource_id,'gather_resumes')
if(utils.error):
sys.exit()
| Python |
'''
Created on Sep 28, 2009
This module spiders the index page of each job and prepares for development spidering.
@author: Steven Norris
'''
import re
import sys
import traceback
import time
BASE_INDEX='sourceforge.net/projects/'
BASE_SITE='sourceforge.net/'
def run(utils,datasource_id,stage):
#Gathers index pages
print("Gathering index pages")
if(stage==0):
stage='completed'
elif(stage==1):
stage='gather_development'
else:
stage='gather_donors'
#runs jobs
job=utils.get_job(datasource_id,'gather_index')
if(utils.error):
sys.exit()
while(job!=None):
time.sleep(3)
try:
unixname=job[0]
print("Gathering index for "+unixname)
index=utils.get_page("http://"+BASE_INDEX+unixname)
if(index and re.search('We apologize. The page you were looking for cannot be found.',index)==None):
insert="""INSERT INTO sf_project_indexes (proj_unixname,indexhtml,date_collected,datasource_id)
VALUES(%s,%s,NOW(),%s)"""
utils.db_insert(insert,unixname,index,datasource_id)
#changes status, gets new job, and checks for errors
utils.change_status(stage,'gather_index',datasource_id,unixname)
job=utils.get_job(datasource_id,'gather_index')
if (utils.error):
sys.exit()
#if page does not collect properly, posts error, gets new job, and checks for errors
else:
print("!!!!WARNING!!!! Index gathering failed for "+unixname)
utils.post_error('gather_index: \nIndex either did not exist or led to faulty page.' ,datasource_id,unixname)
job=utils.get_job(datasource_id,'gather_index')
if(utils.error):
sys.exit()
#if index process fails, posts error, gets new job, and checks for errors
except:
print("!!!!WARNING!!! Index collection failed")
utils.post_error('gather_index:\n'+traceback.format_exc(),datasource_id,unixname)
job=utils.get_job(datasource_id,'gather_index')
if(utils.error):
sys.exit()
| Python |
'''
Created on Sep 28, 2009
This module spiders the development pages for each job and prepares for the developers spidering.
@author: Steven Norris
'''
import re
import sys
import traceback
import time
BASE_INDEX='sourceforge.net/projects/'
BASE_SITE='sourceforge.net/'
#This spider finds the links for the development page
def developmentSpider(page):
match=re.search('projects/.+?/develop',page)
if (match!=None):
return match.group(0)
else:
return None
def run(utils,datasource_id,stage):
#collects the development pages for each job and adds them to sv_indexes
print("\nGathering development pages.")
if(stage==0):
stage='gather_memberlist'
elif(stage==1):
stage='gather_60day'
else:
stage='gather_mailinglists'
#runs jobs
job=utils.get_job(datasource_id,'gather_development')
if(utils.error):
sys.exit()
while(job!=None):
time.sleep(3)
try:
unixname=job[0]
print("\nGathering for "+unixname)
#collects index page and crawls for development link
print("Retrieving index HTML.")
index_page=utils.get_index(datasource_id,unixname)
index_page=index_page[0]
if(index_page):
print("Finding link.")
link=developmentSpider(index_page)
#inserts development page into project_indexes
if(link):
print("Inserting development page.")
development=utils.get_page("http://"+BASE_SITE+link)
else:
print("Link to development page not found.")
development=None
if(development and re.search('We apologize. The page you were looking for cannot be found.',development)==None):
update="UPDATE sf_project_indexes SET development_html=%s WHERE datasource_id=%s AND proj_unixname=%s"
utils.db_insert(update,development,datasource_id,unixname)
utils.change_status(stage,'gather_development',datasource_id,unixname)
job=utils.get_job(datasource_id,'gather_development')
if(utils.error):
sys.exit()
#if development insertion fails posts error, gets job, and checks for errors
else:
print("!!!!WARNING!!!! Development page did not collect correctly.")
utils.post_error('gather_development:\nDevelopment page either did not exist or led to a faulty page.',datasource_id,unixname)
job=utils.get_job(datasource_id,'gather_development')
if(utils.error):
sys.exit()
#if index_page doesn't collect properly posts error, gets job, and checks for errors
else:
print("!!!!WARNING!!!! Development page did not collect correctly.")
utils.post_error('gather_development:\nIndex gathering yielded a null response.',datasource_id,unixname)
job=utils.get_job(datasource_id,'gather_development')
if(utils.error):
sys.exit()
#if collecting process fails posts error, gets job, and checks for errors
except:
print("!!!!WARNING!!!! Development page did not collect correctly.")
utils.post_error('gather_development:\n'+traceback.format_exc(),datasource_id,unixname)
job=utils.get_job(datasource_id,'gather_development')
if(utils.error):
sys.exit()
| Python |
'''
Created on Apr 12, 2010
This module performs the clean up for gather_mailinglists jobs.
@author: StevenNorris
'''
import sys
import traceback
def run(utils,datasource_id):
#Cleans up gather_mailinglists jobs
print("\nStarting mailing lists clean up.")
#Gets Job
job=utils.get_cleanup_job(datasource_id,'gather_mailinglists')
if(utils.error):
sys.exit()
while(job!=None):
#Cleans up for the job
try:
unixname=job[0]
print "Cleaning up for "+unixname
utils.delete_mailinglists(unixname,datasource_id)
utils.change_status('gather_mailinglists','Clean_Up',datasource_id,unixname)
job=utils.get_cleanup_job(datasource_id,'gather_mailinglists')
if(utils.error):
sys.exit()
#If process fails, post error and get new job
except:
print("!!!!WARNING!!!! Clean up mailing lists for "+unixname+" failed.")
utils.post_error('Clean_Up(mailing lists):\n'+traceback.format_exc(),datasource_id,unixname)
job=utils.get_cleanup_job(datasource_id,'gather_mailinglists')
if(utils.error):
sys.exit()
| Python |
'''
Created Dec. 13, 2009
This module is used to spider the mailing list pages for message pages and store them in the database.
@author: Steven Norris
'''
import re
import sys
import time
BASE_INDEX='sourceforge.net/mailarchive/'
BASE_SITE='sourceforge.net/'
#This method finds page links for a mailing list page
def mailing_month(page):
final_matches=[]
matches=re.findall('(forum.php\?forum_name=.+?&max_rows=.+?&style=.+?&viewmonth=.+?)">\((\d+?)\)</a>',page)
if matches:
for matchSet in matches:
match=matchSet[0]
num=matchSet[1]
match=match.replace('&','&')
final_matches.append(match[0:match.find('max_rows')+10]+num+
match[match.find('&style='):match.find('&style=')+7]+'flat'+
match[match.find('&viewmonth='):len(match)])
return final_matches
#This method runs the main method for collection of message pages
def run(utils,datasource_id):
print('\nGathering message pages.')
#Gather job and check for errors
job=utils.get_job(datasource_id,'gather_messages')
if (utils.error):
sys.exit()
while(job!=None):
time.sleep(3)
unixname=job[0]
print('\nGathering for '+unixname)
#Retrieve mailinglists page from database
print("Retrieving Specific Mailing List HTML")
mailing_pages=utils.get_mailing_specific(datasource_id,unixname)
if(mailing_pages):
#Gathering links for each year and month
for page in mailing_pages:
list=page[0]
html=page[1]
print('*Retrieving for '+list)
month_links=mailing_month(html)
if(month_links):
#Gather pages for each link
for link in month_links:
year=link[len(link)-6:len(link)-2]
month=link[len(link)-2:]
time.sleep(3)
print('**Collecting for '+month+':'+year)
print('**Using link: '+link)
page=utils.get_page('http://'+BASE_INDEX+link)
#Insert each page into databse
if(page and re.search('We apologize. The page you were looking for cannot be found.',page)==None):
insert='''INSERT INTO sf_mailing_pages_indexes (proj_unixname,list_name,year,month,messages_html,datasource_id,date_collected)
VALUES (%s,%s,%s,%s,%s,%s,NOW())'''
print('**Inserting into database')
utils.db_insert(insert,unixname,list,year,month,page,datasource_id)
#If page doesn't exist, print warning
else:
print('**Link '+link+ 'either led to a faulty page or did not exist.')
#If links don't exist, set status, get job, and check for errors
else:
print("*!!Specific Mailing List Pages do not Exist!!.")
#Change status, get job, and check for errors
utils.change_status('completed','gather_messages',datasource_id,unixname)
job=utils.get_job(datasource_id,'gather_messages')
if(utils.error):
sys.exit()
#If specific mailing lists don't exist, change status, get job, and check for errors
else:
print("!!Specific Mailing Lists do not Exist!!")
utils.change_status('completed','gather_messages',datasource_id,unixname)
job=utils.get_job(datasource_id,'gather_messages')
if(utils.error):
sys.exit() | Python |
'''
Created on Aug 16, 2009
This module is designed to run the necessary code to spider the information from
sourceforge.net and add the information to the oss_mole database.
RUN INSTRUCTIONS
Run this module from command line with the following format:
[Interpreter] SourceForgeSpider.py [datasource_id] [Test T/F] [Stage 1/2/3/4/5] [Mailing Mode]
@author: StevenNorris
'''
from SourceForgeUtils import SourceForgeUtils
import sys
import SourceForgeIndex
import SourceForgeDevelopment
import SourceForge60day
import SourceForgeYear
import SourceForgeDevelopers
import SourceForgeResumes
import SourceForgeMailingLists
import SourceForgeMailingListsSpecific
import SourceForgeMailingPagesMonthly
import SourceForgeDonors
import SourceForgeMailingPages
#this method runs all necessary method for spidering sourceforge.net
def main(argv):
#set variables
try:
datasource_id=argv[1]
test=argv[2]
if(not test=='T' and not test=='F'):
test=1/0
stage=argv[3]
if(stage=='1'):
stage=1
elif(stage=='2'):
stage=2
elif(stage=='3'):
stage=3
elif(stage=='4'):
stage=4
elif(stage=='5'):
stage=5
mailing=argv[4]
if(not mailing=='M' and not mailing=='A'):
mailing=1/0
else:
stage=1/0
except:
print("""RUN INSTRUCTIONS
Run this module from command line with the following format:
[Interpreter] SourceForgeSpider.py [datasource_id] [Test T/F] [Stage 1/2/3/4/5] [Mailing Collection A/M]""")
sys.exit()
#Checks for test mode
try:
if (test=='T'):
print("TEST MODE ACTIVATED")
utils=SourceForgeUtils('dbInfoTest.txt')
else:
utils=SourceForgeUtils('dbInfo.txt')
except:
print("Please create the dbInfo.txt and the dbInfoTest.txt files. See ReadMe for formatting.")
sys.exit()
#runs the spidering
if(stage==1):
SourceForgeIndex.run(utils,datasource_id,0)
elif(stage==2):
SourceForgeIndex.run(utils,datasource_id,1)
SourceForgeDevelopment.run(utils,datasource_id,0)
SourceForgeDevelopers.run(utils,datasource_id)
SourceForgeResumes.run(utils,datasource_id)
elif(stage==3):
SourceForgeIndex.run(utils,datasource_id,2)
SourceForgeDonors.run(utils,datasource_id)
elif(stage==4):
SourceForgeIndex.run(utils,datasource_id,1)
SourceForgeDevelopment.run(utils,datasource_id,1)
SourceForge60day.run(utils,datasource_id)
SourceForgeYear.run(utils,datasource_id)
else:
SourceForgeIndex.run(utils,datasource_id,1)
SourceForgeDevelopment.run(utils,datasource_id,2)
SourceForgeMailingLists.run(utils,datasource_id)
SourceForgeMailingListsSpecific.run(utils,datasource_id)
if(mailing=='A'):
SourceForgeMailingPages.run(utils,datasource_id)
else:
SourceForgeMailingPagesMonthly.run(utils,datasource_id)
main(sys.argv) | Python |
'''
Created on Apr 12, 2010
This module performs the clean up for gather_donors jobs.
@author: StevenNorris
'''
import sys
import traceback
def run(utils,datasource_id):
#Cleans up gather_donors jobs
print("\nStarting donors clean up.")
#Gets Job
job=utils.get_cleanup_job(datasource_id,'gather_donors')
if(utils.error):
sys.exit()
while(job!=None):
#Cleans up for the job
try:
unixname=job[0]
print "Cleaning up for "+unixname
utils.delete_donors(unixname,datasource_id)
utils.change_status('gather_donors','Clean_Up',datasource_id,unixname)
job=utils.get_cleanup_job(datasource_id,'gather_donors')
if(utils.error):
sys.exit()
#If process fails, post error and get new job
except:
print("!!!!WARNING!!!! Clean up donors for "+unixname+" failed.")
utils.post_error('Clean_Up(donors):\n'+traceback.format_exc(),datasource_id,unixname)
job=utils.get_cleanup_job(datasource_id,'gather_donors')
if(utils.error):
sys.exit()
| Python |
# -*- coding: utf-8 -*-
import re
import StringIO
import sys
import os
import tarfile
import zipfile
#from pyparsing import dblQuotedString
#from pyparsing import sglQuotedString
import sqlalchemy
from sqlalchemy import *
import shutil
from mpi4py import MPI
import ConfigParser
config = ConfigParser.RawConfigParser()
config.read('metric.conf')
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
cachedir = 'metric_cache'+str(rank)+'/'
if not os.path.exists(cachedir):
os.makedirs(cachedir)
try:
DB_USER = config.get('metric','user')
DB_PASS = config.get('metric','pass')
DB_ADDRESS = config.get('metric','database')
METRIC = config.get('metric','table')
DATASOURCE = config.getint('metric','datasource')
except:
print 'error reading config file'
sys.exit(1)
#database setup
mysql_db = create_engine('mysql://'+DB_USER+':'+DB_PASS+'@'+DB_ADDRESS+'?charset=utf8&use_unicode=0')
connection = mysql_db.connect()
meta = MetaData()
meta.bind = connection
metrics = Table(METRIC, meta, autoload=True)
def untarFile(filename):
tf = tarfile.open(filename,'r')
for item in tf:
tf.extract(item,path=cachedir)
def walkSource():
filestack = []
files = os.walk(cachedir)
for x in files:
for y in x[2]:
if y[len(y)-2:len(y)] == '.c' or y[len(y)-2:len(y)] == '.h' or y[len(y)-5:len(y)] == '.h.in' or y[len(y)-4:len(y)] == '.cpp' or y[len(y)-4:len(y)] == '.c++': #other extensions?
try:
if os.path.getsize(x[0]+ '/' + y) > 8: #no more empty files
reader = open(x[0]+ '/' + y,'r')
filestack.append(reader.read())
reader.close()
except:
print 'failed to open: '+x[0]+'/'+y
if len(filestack) <1:
return 'EMPTY'
return filestack
def stripSource1(source):
parsed_source = source
parsed_source=re.sub(r'[^\\]".*[^\\]"','""',parsed_source)
parsed_source=re.sub(r"[^\\]'.*[^\\]'","''",parsed_source)
# dblQuotedString.setParseAction(lambda : "")
# sglQuotedString.setParseAction(lambda : "")
# for x in dblQuotedString.scanString(parsed_source):
# parsed_source = parsed_source.replace(parsed_source[x[1]:x[2]],'')
# for x in sglQuotedString.scanString(parsed_source):
# parsed_source = parsed_source.replace(parsed_source[x[1]:x[2]],'')
return parsed_source
def stripSource2(source):
parsed_source = source
cstyle = re.compile('/\*.*?\*/',re.DOTALL)
parsed_source = re.sub(cstyle,'',parsed_source)
#removes single line comments (c++ style)
parsed_source = re.sub('//.*','',parsed_source)
#remove blank lines
parsed_source = re.sub('\n\s*?\n','\n',parsed_source)
return parsed_source
def stripSource3(source):
#remove double spaces, and newlines
return re.sub('\s+',' ',re.sub('\n',' ',source)) #pulls out newlines first, so we dont create new dbl spaces
## might not need the \n part, \s+ could pull newlines as well
def buildStripped1():
for source in fs1:
fs2.append(stripSource1(source))
return fs2
def buildStripped2():
for source in fs2:
fs3.append(stripSource2(source))
return fs3
def buildStripped3():
for source in fs3:
fs4.append(stripSource3(source))
return fs4
def getNumFiles():
return len(fs1)
def getLines():
lines = 0
for sourcefile in fs2:
lines += (len(re.findall('\n',sourcefile))+1)
return lines
def getSourceLines():
lines = 0
for sourcefile in fs3:
lines += (len(re.findall('\n',sourcefile))+1)
return lines
def getBlankLines():
numlines = 0
for sourcefile in fs2:
lines = re.findall('\n\s*?\n',sourcefile)
numlines += len(lines)
return numlines
def getComments():
comments = 0
for sourcefile in fs1:
single = re.findall('//.*?\n',sourcefile)
if single:
comments += len(single)
l = re.compile('/\*.*?\*/',re.DOTALL)
multiline = re.findall(l,sourcefile)
for instance in multiline:
nl = re.findall('\n',instance)
if nl:
comments += (len(nl) + 1)
else:
comments += 1
return comments
def getMaxDepth():
maxdepth = 0
for sourcefile in fs4:
depth = 0
for char in sourcefile:
if char == '{':
depth += 1
elif char == '}':
depth -= 1
if depth > maxdepth:
maxdepth = depth
return maxdepth
def getMethods():
summation = 0
for sourcefile in fs4:
x = re.findall('\w+? \w+?\s?\(.*?\)\s?{.*?}',sourcefile)
summation += len(x)
return summation
def getStruct():
summation = 0
for sourcefile in fs4:
x = re.findall('struct \w+?\s?{.*?}',sourcefile)
summation += len(x)
return summation
def getFanout():
includes = set()
for sourcefile in fs4:
found = re.findall('#include\s?<.*?>',sourcefile)
for instance in found:
includes.add(instance)
return len(includes)
def getNcloc():#non-comment lines of code
temp = []
total = 0
cstyle = re.compile('/\*.*?\*/',re.DOTALL)
for sourcefile in fs2:
parsed_source = sourcefile
parsed_source = re.sub(cstyle,'',parsed_source)
parsed_source = re.sub('//.*','',parsed_source)
temp.append(parsed_source)
for sourcefile in temp:
lines = re.findall('\n',sourcefile)
total += len(lines)
return total
def getTodo():
summation = 0
for sourcefile in fs1:
todo = []
todo.append(re.findall('TODO',sourcefile))
todo.append(re.findall('FIX-ME',sourcefile))
todo.append(re.findall('FIXME',sourcefile))
todo.append(re.findall('FIX-IT',sourcefile))
todo.append(re.findall('FIXIT',sourcefile))
todo.append(re.findall('TO-DO',sourcefile))
todo.append(re.findall('TODO',sourcefile))
todo.append(re.findall('XXX',sourcefile))
todo.append(re.findall('TBD',sourcefile))
for x in todo:
summation += len(x)
return summation
def getWmc():
summation = 0
for sourcefile in fs4:
a = re.findall('\sif\s?\(',sourcefile)
b = re.findall('\swhile\s?\(',sourcefile)
c = re.findall('\sfor\s?\(',sourcefile)
d = re.findall('\scase\s.*?:',sourcefile)
e = re.findall('\sdefault\s?:',sourcefile)
f = re.findall('\sdo\s?{',sourcefile)
summation += ( len(a)+len(b)+len(c)+len(d)+len(e)+len(f) )
return summation + num_methods
def getBooleanComplex():
summation = 0
for sourcefile in fs4:
x = re.findall('(&&)|(\|\|)|(!)',sourcefile) # && || !
summation += len(x)
return summation
def getClasses():
summation = 0
for sourcefile in fs4:
x = re.findall(' class .*?{.*?};',sourcefile)
summation += len(x)
return summation
proj_list = connection.execute("SELECT project_name,path FROM "+METRIC+" WHERE datasource_id = "+str(DATASOURCE)+" AND dc IS NULL;").fetchall()
#print proj_list[0][1]
n = len(proj_list)/comm.Get_size()
ilo = rank*n
ihi = (rank+1)*n-1
if rank+1 == comm.Get_size():
ihi = len(proj_list)-1
for i in range(ilo,ihi+1):
shutil.rmtree(cachedir)
if not os.path.exists(cachedir):
os.makedirs(cachedir)
num_files = 0
num_comments = 0
num_lines = 0
ncss = 0
#num_blank = 0
max_bracket_depth = 0
num_todo = 0
num_methods = 0
fanout = 0
ncloc = 0
noc = 0
fs1 = []
fs2 = []
fs3 = []
fs4 = []
print proj_list[i][0] + " " +proj_list[i][1]
untarFile(proj_list[i][1]) #index i, 1 is path (0 being name)
print 'untar done'
fs1 = walkSource()
print 'Source walked'
if fs1 == 'EMPTY':
connection.execute(metrics.update().where(metrics.c.datasource_id == DATASOURCE).where(metrics.c.project_name == proj_list[i][0]).values(dc=-1,last_updated = func.now()))
print 'empty project'
continue
print 'not empty'
fs2 = buildStripped1()
print 'stripped 1'
fs3 = buildStripped2()
print 'stripped 2'
fs4 = buildStripped3()
print 'stripped 3'
num_files = getNumFiles()
num_lines = getLines()
num_comments = getComments()
ncss = getSourceLines()
ncloc = getNcloc()
#num_blank = getBlankLines()
#max_bracket_depth = getMaxDepth()
num_todo = getTodo()
num_methods = getMethods()
num_struct = getStruct() ## add into database
fanout = getFanout()
wmc = getWmc() #must be done AFTER getMethods
bool_cmp = getBooleanComplex()
classes = getClasses()
connection.execute(metrics.update().where(metrics.c.datasource_id == DATASOURCE).where(metrics.c.project_name == proj_list[i][0]).values(todo_count = num_todo,dc = float(num_comments)/float(num_lines),cloc = num_comments,loc = num_lines,ncloc = ncloc,ncss = ncss,nom = num_methods,bool_exp = bool_cmp, fanout = fanout, wmc = wmc,files = num_files,noc = classes,last_updated = func.now()))
shutil.rmtree(cachedir)
| Python |
# Carter Kozak
# c4kofony@gmail.com
# ckozak@elon.edu
# parser for Tigris data
# flossmole.org
# Copyright (C) 2011 Carter Kozak
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sqlalchemy
from sqlalchemy import *
import urllib
import re
import time
import sys
from datetime import datetime
from dateutil.parser import parse
import ConfigParser
config = ConfigParser.RawConfigParser()
config.read('tg.conf')
try:
DB_USER = config.get('tigris','user')
DB_PASS = config.get('tigris','pass')
DB_ADDRESS = config.get('tigris','address')
PROJECT_LIST_INDEXES = config.get('tigris','proj_list_indexes')
PROJ_INDEXES = config.get('tigris','project_indexes')
PROJECTS = config.get('tigris','projects')
PROJ_CAT = config.get('tigris','categories')
DISC_INDEXES = config.get('tigris','disc_indexes')
PEOPLE = config.get('tigris','people')
#PROJ_DEV = config.get('tigris','proj_dev')
DEV_ROLES = config.get('tigris','dev_roles')
DEV_INDEXES = config.get('tigris','dev_indexes')
DATASOURCE = config.getint('tigris','datasource')
DISCUSS = config.get('tigris','discuss')
except Exception as e:
print e
print 'error reading tg.conf'
sys.exit(1)
db = create_engine('mysql://'+DB_USER+':'+DB_PASS+'@'+DB_ADDRESS+'?charset=utf8&use_unicode=1')
connection = db.connect()
meta = MetaData()
meta.bind = connection
try:
proj_list_indexes = Table(PROJECT_LIST_INDEXES, meta, autoload=True)
project_indexes = Table(PROJ_INDEXES, meta, autoload=True)
projects = Table(PROJECTS, meta, autoload=True)
categories = Table(PROJ_CAT, meta, autoload=True)
people = Table(PEOPLE, meta, autoload=True)
#proj_dev = Table(PROJ_DEV, meta, autoload=True)
dev_roles = Table(DEV_ROLES, meta, autoload=True)
dev_indexes = Table(DEV_INDEXES, meta, autoload=True)
disc_indexes = Table(DISC_INDEXES, meta, autoload=True)
discuss = Table(DISCUSS, meta, autoload=True)
except Exception as e:
print e
print 'bad table info in tg.conf'
sys.exit(1)
def parseProject(name):
proj = connection.execute('SELECT html FROM '+PROJ_INDEXES+' WHERE datasource_id = '+str(DATASOURCE)+' AND unixname = "'+name+'";')
html = proj.fetchone()[0]
proj.close()
#summary = re.search('<tr>\s+<th>Summary</th>\s+<td>(.*?)</td>\s+</tr>',html)
#print summary.group(1)
#categories = re.search('<tr>\s+<th>\s+Categor\S+\s+</th>\s+<td>\s+(.*?)\s+</td>\s+</tr>',html)
#if categories:
#hat = re.findall('org/">(.*?)</a>',categories.group(1))
#for cat in hat:
#pass
#try:
# connection.execute(categories.insert().values(datasource_id = DATASOURCE))
#except Exception as e:
# print e
license = re.search('<tr>\s+<th>License</th>\s+<td>\s+.*?">(.*?)</a>\s+</td>\s+</tr>',html)
if license:
#print license.group(1)
try:
connection.execute(projects.update().where(projects.c.datasource_id==DATASOURCE).where(projects.c.unixname==name).values(last_updated = func.now, license = license.group(1)))
except Exception as e:
print e
#owns = re.search('<tr>\s+<th>Owner[(]s[)]</th>\s+<td>\s+?(.*?)\s+?</td>\s+</tr>',html)
#if owns:
# o = re.findall('">(.*?)</a>',owns.group(1))
# for each in o:
# #print each
# try:
# connection.execute(project_owners.insert().values(project = name, datasource_id = DATASOURCE, last_updated = func.now(), owner = each))
# except Exception as e:
# print e
def parseDevs(name):
proj = connection.execute('SELECT html,project FROM '+DEV_INDEXES+' WHERE datasource_id = '+str(DATASOURCE)+' AND project = "'+name+'";')
html = proj.fetchone()
proj.close()
peoples = re.findall('<tr class="[ab]">\s+<td>\s*(.*?)\s*</td>\s+<td>\s*(.*?)\s*</td>\s+<td>\s+(.*?)\s+</td>\s+</tr>',html[0],re.DOTALL)
#people = re.findall('<tr class="[ab]">(.*?)</tr>',html,re.DOTALL)
for each in peoples:
#print each[0] #username
#print each[1] #full name
try:
connection.execute(people.insert().values(datasource_id = DATASOURCE, username = each[0], full_name = each[1], last_updated = func.now()))
except Exception as e:
print e
print 'problem inserting people'
for role in each[2].split(','):
#print role.strip()
try:
connection.execute(dev_roles.insert().values(datasource_id = DATASOURCE, last_updated = func.now(), project = html[1], username = each[0], role = role.strip()))
except Exception as e:
print e
print 'problem inserting roles'
def parseLists(name):
proj = connection.execute('SELECT html,project FROM '+DISC_INDEXES+' WHERE datasource_id = '+str(DATASOURCE)+' AND project = "'+name+'";')
html = proj.fetchone()
proj.close()
lol = re.findall('<tr class="[ab]">\s*(.*?)\s*</tr>',html[0],re.DOTALL) #lol: list of lists
for l in lol:
last_updated = None
last_post = re.search('<span class="nowrap">\s*(\d{4}.*?)\s*</span>',l)
try:
last_updated = parse(last_post.group(1))
except:
pass
title = re.search("<a href='viewForumSummary.do[?]dsForumId=(\S+)'>\s+(.*?)\s+</a>",l)
#print title.group(1)
desc = re.search('<p class="attrdesc">\s*(.*?)\s*</p>',l,re.DOTALL)
#print desc.group(1)
try:
connection.execute(discuss.insert().values(datasource_id = DATASOURCE, last_updated = func.now(), discussion = title.group(2), description = desc.group(1), last_comment = last_updated, forumid = title.group(1), project = html[1]))
except Exception as e:
print e
def parseProjects():
project_list = connection.execute("SELECT unixname FROM "+PROJ_INDEXES+" WHERE datasource_id = "+str(DATASOURCE)+";")
for each in project_list:
parseProject(each[0])
parseDevs(each[0])
parseLists(each[0])
parseProjects()
| Python |
# Carter Kozak
# c4kofony@gmail.com
# ckozak@elon.edu
#collector for Tigris data
# flossmole.org
# Copyright (C) 2011 Carter Kozak
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sqlalchemy
from sqlalchemy import *
import urllib
import re
import time
import sys
import ConfigParser
config = ConfigParser.RawConfigParser()
config.read('tg.conf')
try:
DB_USER = config.get('tigris','user')
DB_PASS = config.get('tigris','pass')
DB_ADDRESS = config.get('tigris','address')
PROJECT_LIST_INDEXES = config.get('tigris','proj_list_indexes')
PROJ_INDEXES = config.get('tigris','project_indexes')
PROJECTS = config.get('tigris','projects')
PROJ_CAT = config.get('tigris','categories')
DISC_INDEXES = config.get('tigris','disc_indexes')
PEOPLE = config.get('tigris','people')
# PROJ_DEV = config.get('tigris','proj_dev')
DEV_ROLES = config.get('tigris','dev_roles')
DEV_INDEXES = config.get('tigris','dev_indexes')
DATASOURCE = config.getint('tigris','datasource')
except Exception as e:
print e
print 'error reading tg.conf'
sys.exit(1)
db = create_engine('mysql://'+DB_USER+':'+DB_PASS+'@'+DB_ADDRESS+'?charset=utf8&use_unicode=1')
connection = db.connect()
meta = MetaData()
meta.bind = connection
try:
proj_list_indexes = Table(PROJECT_LIST_INDEXES, meta, autoload=True)
project_indexes = Table(PROJ_INDEXES, meta, autoload=True)
projects = Table(PROJECTS, meta, autoload=True)
categories = Table(PROJ_CAT, meta, autoload=True)
people = Table(PEOPLE, meta, autoload=True)
# proj_dev = Table(PROJ_DEV, meta, autoload=True)
dev_roles = Table(DEV_ROLES, meta, autoload=True)
dev_indexes = Table(DEV_INDEXES, meta, autoload=True)
disc_indexes = Table(DISC_INDEXES, meta, autoload=True)
except Exception as e:
print e
print 'bad table info in tg.conf'
sys.exit(1)
def getProjectPage():
if 0 < len(connection.execute("SELECT html FROM "+PROJECT_LIST_INDEXES+" WHERE datasource_id = "+str(DATASOURCE)+";").fetchall()):
return
try:
project_page = urllib.urlopen('http://www.tigris.org/servlets/ProjectList?type=Projects&&field=ProjectName&matchValue=&matchType=contains&mode=Filtered&pageNum=1&itemsPerPage=500000')
project_indexes = project_page.read()
project_page.close()
connection.execute(proj_list_indexes.insert().values(datasource_id = DATASOURCE, last_updated= func.now(), html = project_indexes))
except Exception as e:
print type(e)
print 'error getting projects list, trying again in 30 sec'
time.sleep(30)
getProjectPage()
def getProjectList():
if 0 < int(connection.execute("SELECT count(*) FROM "+PROJ_INDEXES+" WHERE datasource_id = "+str(DATASOURCE)+";").fetchone()[0]):
return
project_page = connection.execute("SELECT html FROM "+PROJECT_LIST_INDEXES+" WHERE datasource_id = "+str(DATASOURCE)+";").fetchone()
proj_list = re.findall(r'<tr class="[ab]">\s+?<td><a href="http://.+?tigris.org/">(.+?)</a>\s+?</td>\s+?<td>(.*?)</td>\s+?<td>\s+?(.+?)\s+?</td>\s+?</tr>',project_page['html'])
for each in proj_list:
for category in re.findall('">(.*?)</a>', each[2]):
#print category
try:
connection.execute(categories.insert().values(datasource_id = DATASOURCE, project = each[0].strip(), category = category.strip(), last_updated = func.now()))
except Exception as e:
#print e
pass
try:
connection.execute(projects.insert().values(datasource_id = DATASOURCE, unixname = each[0].strip(), description = each[1].strip(), last_updated = func.now()))
except Exception as e:
#print e
pass
#print each[0] #name
#print each[1] #description
def getProjPage(name):
try:
temp_page = urllib.urlopen('http://'+name+'.tigris.org/')
result = temp_page.read()
temp_page.close()
return result
except Exception as e:
print e
print 'Something went wrong fetching project page for '+str(name)+'.\nWaiting 30 sec and trying again.'
time.sleep(30)
return getProjPage(name)
def getProjectIndexes():
projects_list = connection.execute("SELECT p.unixname FROM "+PROJECTS+" p LEFT OUTER JOIN "+PROJ_INDEXES+" i ON p.unixname = i.unixname AND p.datasource_id = i.datasource_id WHERE p.datasource_id = "+str(DATASOURCE)+" AND i.html is null;")
for each in projects_list:
time.sleep(1)
try:
connection.execute(project_indexes.insert().values(datasource_id = DATASOURCE, unixname = each[0], last_updated = func.now(), html = getProjPage(each[0])))
except Exception as e:
print e
def getDevPage(name):
try:
temp_page = urllib.urlopen('http://'+name+'.tigris.org/servlets/ProjectMemberList')
result = temp_page.read()
temp_page.close()
return result
except Exception as e:
print e
print 'Something went wrong fetching dev page for '+str(name)+'.\nWaiting 30 sec and trying again.'
time.sleep(30)
return getDevPage(name)
def getProjectDevs():
projects_list = connection.execute("SELECT p.unixname FROM "+PROJECTS+" p LEFT OUTER JOIN "+DEV_INDEXES+" i ON p.unixname = i.project AND p.datasource_id = i.datasource_id WHERE p.datasource_id = "+str(DATASOURCE)+" AND i.html is null;")
for each in projects_list:
time.sleep(1)
try:
connection.execute(dev_indexes.insert().values(datasource_id = DATASOURCE, last_updated = func.now(), project = each[0], html = getDevPage(each[0])))
except Exception as e:
print e
def getDiscPage(name):
try:
temp_page = urllib.urlopen('http://'+name+'.tigris.org/ds/viewForums.do')
result = temp_page.read()
temp_page.close()
return result
except Exception as e:
print e
print 'Something went wrong fetching discuss page for '+str(name)+'.\nWaiting 30 sec and trying again.'
time.sleep(30)
return getDiscPage(name)
def getProjectDiscuss():
projects_list = connection.execute("SELECT p.unixname FROM "+PROJECTS+" p LEFT OUTER JOIN "+DISC_INDEXES+" i ON p.unixname = i.project AND p.datasource_id = i.datasource_id WHERE p.datasource_id = "+str(DATASOURCE)+" AND i.html is null;")
for each in projects_list:
time.sleep(1)
try:
connection.execute(disc_indexes.insert().values(datasource_id = DATASOURCE, last_updated = func.now(), project = each[0], html = getDiscPage(each[0])))
except Exception as e:
print e
getProjectPage()
getProjectList()
getProjectIndexes()
getProjectDevs()
getProjectDiscuss()
| Python |
# Carter Kozak
# c4kofony@gmail.com
# ckozak@elon.edu
#collector/parser for Tigris mailing list data
# flossmole.org
# Copyright (C) 2011 Carter Kozak
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sqlalchemy
from sqlalchemy import *
import urllib
import urllib2
import re
import time
import sys
from datetime import datetime
from dateutil.parser import parse
import ConfigParser
import warnings
warnings.filterwarnings('ignore')
config = ConfigParser.RawConfigParser()
config.read('tg.conf')
try:
DB_USER = config.get('tigris','user')
DB_PASS = config.get('tigris','pass')
DB_ADDRESS = config.get('tigris','address')
PROJECT_LIST_INDEXES = config.get('tigris','proj_list_indexes')
PROJ_INDEXES = config.get('tigris','project_indexes')
PROJECTS = config.get('tigris','projects')
PROJ_CAT = config.get('tigris','categories')
DISC_INDEXES = config.get('tigris','disc_indexes')
PEOPLE = config.get('tigris','people')
#PROJ_DEV = config.get('tigris','proj_dev')
DEV_ROLES = config.get('tigris','dev_roles')
DEV_INDEXES = config.get('tigris','dev_indexes')
DATASOURCE = config.getint('tigris','datasource')
DISCUSS = config.get('tigris','discuss')
MESSAGES = config.get('tigris','messages')
except Exception as e:
print e
print 'error reading tg.conf'
sys.exit(1)
db = create_engine('mysql://'+DB_USER+':'+DB_PASS+'@'+DB_ADDRESS+'?charset=utf8&use_unicode=1')
connection = db.connect()
meta = MetaData()
meta.bind = connection
try:
proj_list_indexes = Table(PROJECT_LIST_INDEXES, meta, autoload=True)
project_indexes = Table(PROJ_INDEXES, meta, autoload=True)
projects = Table(PROJECTS, meta, autoload=True)
categories = Table(PROJ_CAT, meta, autoload=True)
people = Table(PEOPLE, meta, autoload=True)
#proj_dev = Table(PROJ_DEV, meta, autoload=True)
dev_roles = Table(DEV_ROLES, meta, autoload=True)
dev_indexes = Table(DEV_INDEXES, meta, autoload=True)
disc_indexes = Table(DISC_INDEXES, meta, autoload=True)
discuss = Table(DISCUSS, meta, autoload=True)
messages = Table(MESSAGES, meta, autoload=True)
except Exception as e:
print e
print 'bad table info in tg.conf'
sys.exit(1)
def downloadList(project, forumid):
url = 'http://'+str(project)+'.tigris.org/servlets/WebFeed?artifact=messages&dsForumId='+str(forumid)
#f = urllib.urlopen(url)
try:
f = urllib2.urlopen(url)
mailing_list = f.read()
f.close()
#posts = re.findall('<item>.*?</item>',mailing_list, re.DOTALL)
posts = re.findall(r'<item>\s*<title>\s*(.*?)\s*</title>\s*<link>\s*(.*?)\s*</link>\s*<description>\s*(.*?)\s*</description>\s*<pubDate>\s*(.*?)\s*</pubDate>\s*<guid>\s*(.*?)\s*</guid>\s*<dc:creator>\s*(.*?)\s*</dc:creator>\s*<dc:date>\s*(.*?)\s*</dc:date>\s*</item>',mailing_list, re.DOTALL)
for post in posts:
#print 'title: '+post[0]
#print 'link: '+post[1]
#print 'description: '+post[2]
#print 'pubDate: '+post[3]
#print 'guid: '+post[4]
#print 'creator: '+post[5]
#print 'date: '+post[6]
try:
connection.execute(messages.insert().values(datasource_id = DATASOURCE, title = post[0], link = post[1], description = post[2], pubDate = parse(post[3]), guid = post[4], creator = post[5], postDate = parse(post[6]),postDateStr = post[6], project = project, forumid = forumid))
except Exception as e:
pass
except urllib2.HTTPError, x:
print 'Ignoring '+str(project)+' : '+str(forumid)+' probably requires login'
def doLists():
proj_list = connection.execute("SELECT project,forumid FROM "+DISCUSS+" WHERE datasource_id = "+str(DATASOURCE)+";")
for proj in proj_list:
downloadList(proj['project'],proj['forumid'])
doLists()
| Python |
'''
Created on Apr 12, 2010
This method is made to clean up the jobs left In_Progress by machine error and prepare them for a second run.
@author: StevenNorris
'''
from GoogleCodeUtils import GoogleCodeUtils
import sys
import GoogleCodeHomeCleanUp
import GoogleCodeUpdatesCleanUp
import GoogleCodePeopleCleanUp
import GoogleCodeDownloadsCleanUp
import GoogleCodeIssuesCleanUp
import GoogleCodeWikiCleanUp
import GoogleCodePeopleSpecificCleanUp
import GoogleCodeIssuesSpecificCleanUp
#main method for running clean ups
def main(argv):
#set variables
try:
datasource_id=argv[1]
test=argv[2]
except:
print("""RUN INSTRUCTIONS
Run this module from command line with the following format:
[Interpreter] GoogleCodeCleanUp.py [datasource_id] [Test T/F]
Test is a string variable. Be sure to use a capital 'T' to denote test mode.
Otherwise use 'F'.""")
sys.exit()
#Checks for test mode
try:
if (test=='T'):
print("TEST MODE ACTIVATED")
utils=GoogleCodeUtils('dbInfoTest.txt')
else:
utils=GoogleCodeUtils('dbInfo.txt')
except:
print("Please create the dbInfo.txt and the dbInfoTest.txt files. See ReadMe for formatting.")
sys.exit()
#Does the cleanup for the GoogleCode projects
GoogleCodeHomeCleanUp.run(utils,datasource_id)
GoogleCodeUpdatesCleanUp.run(utils,datasource_id)
GoogleCodePeopleCleanUp.run(utils,datasource_id)
GoogleCodeDownloadsCleanUp.run(utils,datasource_id)
GoogleCodeIssuesCleanUp.run(utils,datasource_id)
GoogleCodeWikiCleanUp.run(utils,datasource_id)
GoogleCodePeopleSpecificCleanUp.run(utils,datasource_id)
GoogleCodeIssuesSpecificCleanUp.run(utils,datasource_id)
main(sys.argv) | Python |
'''
Created on Mar 27, 2010
This module does the spidering for the code.google.com project people pages.
@author: StevenNorris
'''
BASE_LINK="http://code.google.com/p/"
import sys
import time
import traceback
def run(utils,datasource_id):
print("\nGathering people pages.")
#runs jobs
job=utils.get_job(datasource_id,"gather_people")
if(utils.error):
sys.exit()
while(job!=None):
time.sleep(3)
try:
unixname=job[0]
print("Gathering for "+unixname)
#gets home page
people=utils.get_page(BASE_LINK+unixname+"/people/list")
#inserts home page
if(people):
insert="""UPDATE gc_project_indexes SET peoplehtml=%s, last_modified=NOW() WHERE unixname=%s AND datasource_id=%s"""
utils.db_insert(insert,people,unixname,datasource_id)
utils.change_status('gather_downloads','gather_people',datasource_id,unixname)
#get new job
job=utils.get_job(datasource_id,'gather_people')
if(utils.error):
sys.exit()
#if people page does not collect properly, post error and get new job
else:
print("!!!!WARNING!!!! People page gathering failed for "+unixname)
utils.post_error('gather_people: \nPeople page either did not exist or fail to collect.' ,datasource_id,unixname)
job=utils.get_job(datasource_id,'gather_people')
if(utils.error):
sys.exit()
#if process fails, post error and get new job
except:
print("!!!!WARNING!!! People page collection failed")
utils.post_error('gather_people:\n'+traceback.format_exc(),datasource_id,unixname)
job=utils.get_job(datasource_id,'gather_people')
if(utils.error):
sys.exit()
| Python |
'''
Created on Mar 27, 2010
This module is designed to populate the jobs database for sourceforge.net.
RUN INSTRUCTIONS
Run this module from command line with the following format:
[Interpreter] GoogleCodeJobs.py [datasource_id] [Test T/F]
Test is a string variable. Be sure to use a capital 'T' to denote test mode.
Otherwise use 'F'.
@author: StevenNorris
'''
import sys
from GoogleCodeUtils import GoogleCodeUtils
import traceback
import socket
#adds the jobs to the sf_jobs table in the selected database
def main(argv):
#set variables
try:
datasource_id=argv[1]
test=argv[2]
except:
print ("""RUN INSTRUCTIONS\n
Run this module from command line with the following format:\n
[Interpreter] GoogleCodeJobs.py [datasource_id] [Test T/F]\n
Test is a string variable. Be sure to use a capital 'T' to denote test mode.\n
Otherwise use 'F'.""")
sys.exit()
#checks for test mode
if(test=='T'):
try:
print("TEST MODE ACTIVATED")
utils=GoogleCodeUtils('dbInfoTest.txt')
except:
print("Please create the dbInfo.txt and the dbInfoTest.txt files. See ReadMe for formatting.")
sys.exit()
else:
try:
utils=GoogleCodeUtils('dbInfo.txt')
except:
print("Please create the dbInfo.txt and the dbInfoText.txt files. See ReadMe for formatting.")
sys.exit()
#gathering project unixnames
try:
print("Gathering unixnames.")
projects_list=utils.get_projects(datasource_id)
#checks test mode for project amount to be collected
if(test=='T'):
end=50
else:
end=len(projects_list)
#adds jobs to database
try:
print("Creating Jobs")
for project in projects_list[0:end]:
project=project[0]
print("Creating job for "+project)
try:
insert='''INSERT INTO gc_jobs (unixname,datasource_id,status,last_modified,modified_by)
VALUES(%s,%s,'gather_home',NOW(),%s)'''
utils.db_insert(insert,project,datasource_id,socket.gethostname())
except:
print('!!!!WARNING!!!! Job creation failed for '+project+'.')
print(traceback.format_exc())
except:
print('!!!!WARNING!!!! Jobs did not create succesfully')
print(traceback.format_exc())
except:
print('!!!!WARNING!!!! Projects unixnames not collected properly.')
print(traceback.format_exc())
def test(argv):
utils=GoogleCodeUtils('dbInfoTest.txt')
main(sys.argv)
| Python |
'''
Created on Apr 12, 2010
This module performs the clean up for gather_issues jobs.
@author: StevenNorris
'''
import sys
import traceback
def run(utils,datasource_id):
#Cleans up gather_issues jobs
print("\nStarting issues clean up.")
#Gets Job
job=utils.get_cleanup_job(datasource_id,'gather_issues')
if(utils.error):
sys.exit()
while(job!=None):
#Cleans up for the job
try:
unixname=job[0]
print "Cleaning up for "+unixname
utils.delete_issues(unixname,datasource_id)
utils.change_status('gather_issues','Clean_Up',datasource_id,unixname)
job=utils.get_cleanup_job(datasource_id,'gather_issues')
if(utils.error):
sys.exit()
#If process fails, post error and get new job
except:
print("!!!!WARNING!!!! Clean up issues for "+unixname+" failed.")
utils.post_error('Clean_Up(issues):\n'+traceback.format_exc(),datasource_id,unixname)
job=utils.get_cleanup_job(datasource_id,'gather_issues')
if(utils.error):
sys.exit()
| Python |
'''
Created on Mar 27, 2010
This module does the spidering for the code.google.com project home pages.
@author: StevenNorris
'''
BASE_LINK="http://code.google.com/p/"
import sys
import time
import traceback
def run(utils,datasource_id):
print("\nGathering home pages.")
#runs jobs
job=utils.get_job(datasource_id,"gather_home")
if(utils.error):
sys.exit()
while(job!=None):
time.sleep(3)
try:
unixname=job[0]
print("Gathering for "+unixname)
#gets home page
home=utils.get_page(BASE_LINK+unixname)
#inserts home page
if(home):
insert="""INSERT INTO gc_project_indexes (unixname,homehtml,last_modified,datasource_id)
VALUES(%s,%s,NOW(),%s)"""
utils.db_insert(insert,unixname,home,datasource_id)
utils.change_status('gather_updates','gather_home',datasource_id,unixname)
#get new job
job=utils.get_job(datasource_id,'gather_home')
if(utils.error):
sys.exit()
#if home page does not collect properly, post error and get new job
else:
print("!!!!WARNING!!!! Home page gathering failed for "+unixname)
utils.post_error('gather_home: \nHome page either did not exist or fail to collect.' ,datasource_id,unixname)
job=utils.get_job(datasource_id,'gather_home')
if(utils.error):
sys.exit()
#if process fails, post error and get new job
except:
print("!!!!WARNING!!! Home page collection failed")
utils.post_error('gather_home:\n'+traceback.format_exc(),datasource_id,unixname)
job=utils.get_job(datasource_id,'gather_index')
if(utils.error):
sys.exit() | Python |
'''
Created on Apr 12, 2010
This module performs the clean up for gather_home jobs.
@author: StevenNorris
'''
import sys
import traceback
def run(utils,datasource_id):
#Cleans up gather_home jobs
print("\nStarting home clean up.")
#Gets Job
job=utils.get_cleanup_job(datasource_id,'gather_home')
if(utils.error):
sys.exit()
while(job!=None):
#Cleans up for the job
try:
unixname=job[0]
print "Cleaning up for "+unixname
utils.delete_home(unixname,datasource_id)
utils.change_status('gather_home','Clean_Up',datasource_id,unixname)
job=utils.get_cleanup_job(datasource_id,'gather_home')
if(utils.error):
sys.exit()
#If process fails, post error and get new job
except:
print("!!!!WARNING!!!! Clean up home for "+unixname+" failed.")
utils.post_error('Clean_Up(home):\n'+traceback.format_exc(),datasource_id,unixname)
job=utils.get_cleanup_job(datasource_id,'gather_home')
if(utils.error):
sys.exit()
| Python |
'''
Created on Mar 27, 2010
This module does the spidering for the code.google.com project wiki pages.
@author: StevenNorris
'''
BASE_LINK="http://code.google.com/p/"
import sys
import time
import traceback
def run(utils,datasource_id):
print("\nGathering wiki pages.")
#runs jobs
job=utils.get_job(datasource_id,"gather_wiki")
if(utils.error):
sys.exit()
while(job!=None):
time.sleep(3)
try:
unixname=job[0]
print("Gathering for "+unixname)
#gets home page
wiki=utils.get_page(BASE_LINK+unixname+"/w/list")
#inserts home page
if(wiki):
insert="""UPDATE gc_project_indexes SET wikihtml=%s, last_modified=NOW() WHERE unixname=%s AND datasource_id=%s"""
utils.db_insert(insert,wiki,unixname,datasource_id)
utils.change_status('gather_people_specific','gather_wiki',datasource_id,unixname)
#get new job
job=utils.get_job(datasource_id,'gather_wiki')
if(utils.error):
sys.exit()
#if wiki page does not collect properly, post error and get new job
else:
print("!!!!WARNING!!!! Wiki page gathering failed for "+unixname)
utils.post_error('gather_wiki: \nWiki page either did not exist or fail to collect.' ,datasource_id,unixname)
job=utils.get_job(datasource_id,'gather_wiki')
if(utils.error):
sys.exit()
#if process fails, post error and get new job
except:
print("!!!!WARNING!!! Wiki page collection failed")
utils.post_error('gather_wiki:\n'+traceback.format_exc(),datasource_id,unixname)
job=utils.get_job(datasource_id,'gather_wiki')
if(utils.error):
sys.exit()
| Python |
'''
Created on Apr 12, 2010
This module performs the clean up for gather_updates jobs.
@author: StevenNorris
'''
import sys
import traceback
def run(utils,datasource_id):
#Cleans up gather_updates jobs
print("\nStarting updates clean up.")
#Gets Job
job=utils.get_cleanup_job(datasource_id,'gather_updates')
if(utils.error):
sys.exit()
while(job!=None):
#Cleans up for the job
try:
unixname=job[0]
print "Cleaning up for "+unixname
utils.delete_updates(unixname,datasource_id)
utils.change_status('gather_updates','Clean_Up',datasource_id,unixname)
job=utils.get_cleanup_job(datasource_id,'gather_updates')
if(utils.error):
sys.exit()
#If process fails, post error and get new job
except:
print("!!!!WARNING!!!! Clean up updates for "+unixname+" failed.")
utils.post_error('Clean_Up(updates):\n'+traceback.format_exc(),datasource_id,unixname)
job=utils.get_cleanup_job(datasource_id,'gather_updates')
if(utils.error):
sys.exit()
| Python |
'''
Created on Mar 27, 2010
This module does the spidering for the code.google.com project individual people pages.
@author: StevenNorris
'''
BASE_LINK="http://code.google.com"
import sys
import time
import traceback
import re
#This method spiders the given page for the specific people links
def peopleSpider(html):
links=[]
matches=re.findall('<a style="white-space: nowrap" href="(/u/.+?)"',html)
if (len(matches)!=0):
for match in matches:
name=match[3:len(match)-1]
links.append((name,match))
return links
else:
return None
def run(utils,datasource_id):
print("\nGathering individual people pages.")
#runs jobs
job=utils.get_job(datasource_id,"gather_people_specific")
if(utils.error):
sys.exit()
while(job!=None):
time.sleep(3)
try:
unixname=job[0]
print ("Gathering for "+unixname)
#gets home page
home=utils.get_home(datasource_id, unixname)
#checks for forbidden or non-existant pages
if(home):
home=home[0]
if(re.search('<title>Project hosting on Google Code</title>',home)==None):
#gaathers and insertspages for each developer link found
links=peopleSpider(home)
if(links):
for link in links:
time.sleep(3)
name=link[0]
link=link[1]
print("\tInserting developer "+name+" for "+unixname)
dev=utils.get_page(BASE_LINK+link)
insertJoin="""INSERT IGNORE INTO gc_developer_projects (unixname,dev_name,datasource_id,last_modified)
VALUES(%s,%s,%s,NOW())"""
insert="""INSERT IGNORE INTO gc_developer_indexes (dev_name,datasource_id,devhtml,last_modified)
VALUES(%s,%s,%s,NOW())"""
utils.db_insert(insertJoin,unixname,name,datasource_id)
utils.db_insert(insert,name,datasource_id,dev)
#get new job
utils.change_status("gather_issues_specific","gather_people_specific",datasource_id,unixname)
job=utils.get_job(datasource_id,'gather_people_specific')
if(utils.error):
sys.exit()
#if no links are found, get new job
else:
print("!! No links found for "+unixname)
utils.change_status("gather_issues_specific","gather_people_specific",datasource_id,unixname)
job=utils.get_job(datasource_id,'gather_people_specific')
if(utils.error):
sys.exit()
#if home page leads to a forbidden page, get new job
else:
print("!! Home page led to a forbidden page.")
utils.change_status("gather_issues_specific","gather_people_specific",datasource_id,unixname)
job=utils.get_job(datasource_id,'gather_people_specific')
if(utils.error):
sys.exit()
#if home page gathering fails, post error and get new job
else:
print("!!!WARNING!!!! Home page gathering failed for "+unixname)
utils.post_error('gather_people_specific: \nHome page did not collect correctly.' ,datasource_id,unixname)
job=utils.get_job(datasource_id,'gather_people_specific')
if(utils.error):
sys.exit()
#if process fails, post error and get new job
except:
print("!!!!WARNING!!! Specific people pages collection failed")
utils.post_error('gather_people_specific:\n'+traceback.format_exc(),datasource_id,unixname)
job=utils.get_job(datasource_id,'gather_people_specific')
if(utils.error):
sys.exit() | Python |
'''
Created on Apr 12, 2010
This module performs the clean up for gather_downloads jobs.
@author: StevenNorris
'''
import sys
import traceback
def run(utils,datasource_id):
#Cleans up gather_downloads jobs
print("\nStarting downloads clean up.")
#Gets Job
job=utils.get_cleanup_job(datasource_id,'gather_downloads')
if(utils.error):
sys.exit()
while(job!=None):
#Cleans up for the job
try:
unixname=job[0]
print "Cleaning up for "+unixname
utils.delete_downloads(unixname,datasource_id)
utils.change_status('gather_downloads','Clean_Up',datasource_id,unixname)
job=utils.get_cleanup_job(datasource_id,'gather_downloads')
if(utils.error):
sys.exit()
#If process fails, post error and get new job
except:
print("!!!!WARNING!!!! Clean up downloads for "+unixname+" failed.")
utils.post_error('Clean_Up(downloads):\n'+traceback.format_exc(),datasource_id,unixname)
job=utils.get_cleanup_job(datasource_id,'gather_downloads')
if(utils.error):
sys.exit()
| Python |
'''
Created on Apr 12, 2010
This module performs the clean up for gather_people jobs.
@author: StevenNorris
'''
import sys
import traceback
def run(utils,datasource_id):
#Cleans up gather_people jobs
print("\nStarting people clean up.")
#Gets Job
job=utils.get_cleanup_job(datasource_id,'gather_people')
if(utils.error):
sys.exit()
while(job!=None):
#Cleans up for the job
try:
unixname=job[0]
print "Cleaning up for "+unixname
utils.delete_people(unixname,datasource_id)
utils.change_status('gather_people','Clean_Up',datasource_id,unixname)
job=utils.get_cleanup_job(datasource_id,'gather_people')
if(utils.error):
sys.exit()
#If process fails, post error and get new job
except:
print("!!!!WARNING!!!! Clean up people for "+unixname+" failed.")
utils.post_error('Clean_Up(people):\n'+traceback.format_exc(),datasource_id,unixname)
job=utils.get_cleanup_job(datasource_id,'gather_people')
if(utils.error):
sys.exit()
| Python |
'''
Created on Mar 27, 2010
This module does the spidering for the code.google.com project issues pages.
@author: StevenNorris
'''
BASE_LINK="http://code.google.com/p/"
import sys
import time
import traceback
def run(utils,datasource_id):
print("\nGathering issues pages.")
#runs jobs
job=utils.get_job(datasource_id,"gather_issues")
if(utils.error):
sys.exit()
while(job!=None):
time.sleep(3)
try:
unixname=job[0]
print("Gathering for "+unixname)
#gets home page
issues=utils.get_page(BASE_LINK+unixname+"/issues/list")
#inserts home page
if(issues):
insert="""UPDATE gc_project_indexes SET issueshtml=%s, last_modified=NOW() WHERE unixname=%s AND datasource_id=%s"""
utils.db_insert(insert,issues,unixname,datasource_id)
utils.change_status('gather_wiki','gather_issues',datasource_id,unixname)
#get new job
job=utils.get_job(datasource_id,'gather_issues')
if(utils.error):
sys.exit()
#if issues page does not collect properly, post error and get new job
else:
print("!!!!WARNING!!!! Issues page gathering failed for "+unixname)
utils.post_error('gather_issues: \nIssues page either did not exist or fail to collect.' ,datasource_id,unixname)
job=utils.get_job(datasource_id,'gather_issues')
if(utils.error):
sys.exit()
#if process fails, post error and get new job
except:
print("!!!!WARNING!!! Issues page collection failed")
utils.post_error('gather_issues:\n'+traceback.format_exc(),datasource_id,unixname)
job=utils.get_job(datasource_id,'gather_issues')
if(utils.error):
sys.exit()
| Python |
'''
Created on Apr 12, 2010
This module performs the clean up for gather_people_specific jobs.
@author: StevenNorris
'''
import sys
import traceback
def run(utils,datasource_id):
#Cleans up gather_wiki jobs
print("\nStarting people specific clean up.")
#Gets Job
job=utils.get_cleanup_job(datasource_id,'gather_people_specific')
if(utils.error):
sys.exit()
while(job!=None):
#Cleans up for the job
try:
unixname=job[0]
print "Cleaning up for "+unixname
utils.change_status('gather_people_specific','Clean_Up',datasource_id,unixname)
job=utils.get_cleanup_job(datasource_id,'gather_people_specific')
if(utils.error):
sys.exit()
#If process fails, post error and get new job
except:
print("!!!!WARNING!!!! Clean up people specific for "+unixname+" failed.")
utils.post_error('Clean_Up(people specific):\n'+traceback.format_exc(),datasource_id,unixname)
job=utils.get_cleanup_job(datasource_id,'gather_people_specific')
if(utils.error):
sys.exit()
| Python |
'''
Created on Apr 12, 2010
This module performs the clean up for gather_wiki jobs.
@author: StevenNorris
'''
import sys
import traceback
def run(utils,datasource_id):
#Cleans up gather_wiki jobs
print("\nStarting wiki clean up.")
#Gets Job
job=utils.get_cleanup_job(datasource_id,'gather_wiki')
if(utils.error):
sys.exit()
while(job!=None):
#Cleans up for the job
try:
unixname=job[0]
print "Cleaning up for "+unixname
utils.delete_wiki(unixname,datasource_id)
utils.change_status('gather_wiki','Clean_Up',datasource_id,unixname)
job=utils.get_cleanup_job(datasource_id,'gather_wiki')
if(utils.error):
sys.exit()
#If process fails, post error and get new job
except:
print("!!!!WARNING!!!! Clean up wiki for "+unixname+" failed.")
utils.post_error('Clean_Up(wiki):\n'+traceback.format_exc(),datasource_id,unixname)
job=utils.get_cleanup_job(datasource_id,'gather_wiki')
if(utils.error):
sys.exit()
| Python |
'''
Created on Mar 27, 2010
This module is designed to run the necessary code to spider the information from
code.google.com and add the information to the oss_mole database.
RUN INSTRUCTIONS
Run this module from command line with the following format:
[Interpreter] GoogleCodeSpider.py [datasource_id] [Test T/F]
Test is a string variable. Be sure to use a capital 'T' to denote test mode.
Otherwise use 'F'.
@author: StevenNorris
'''
from GoogleCodeUtils import GoogleCodeUtils
import sys
import GoogleCodeHome
import GoogleCodeUpdates
import GoogleCodePeople
import GoogleCodeWiki
import GoogleCodeIssues
import GoogleCodePeopleSpecific
import GoogleCodeIssuesSpecific
import GoogleCodeDownloads
#this method runs all necessary method for spidering sourceforge.net
def main(argv):
#set variables
try:
datasource_id=argv[1]
test=argv[2]
except:
print("""RUN INSTRUCTIONS
Run this module from command line with the following format:
[Interpreter] GoogleCodeSpider.py [datasource_id] [Test T/F]
Test is a string variable. Be sure to use a capital 'T' to denote test mode.
Otherwise use 'F'.""")
sys.exit()
#Checks for test mode
try:
if (test=='T'):
print("TEST MODE ACTIVATED")
utils=GoogleCodeUtils('dbInfoTest.txt')
else:
utils=GoogleCodeUtils('dbInfo.txt')
except:
print("Please create the dbInfo.txt and the dbInfoTest.txt files. See ReadMe for formatting.")
sys.exit()
#runs the spidering
GoogleCodeHome.run(utils,datasource_id)
GoogleCodeUpdates.run(utils,datasource_id)
GoogleCodePeople.run(utils,datasource_id)
GoogleCodeDownloads.run(utils,datasource_id)
GoogleCodeIssues.run(utils,datasource_id)
GoogleCodeWiki.run(utils,datasource_id)
GoogleCodePeopleSpecific.run(utils,datasource_id)
GoogleCodeIssuesSpecific.run(utils,datasource_id)
main(sys.argv)
| Python |
'''
Created on Mar 27, 2010
This module does the spidering for the code.google.com project individual issue pages.
@author: StevenNorris
'''
BASE_LINK="http://code.google.com"
import sys
import time
import traceback
import re
#This method spiders the given page for the individual issue links
def issuesSpider(html):
links=[]
matches=re.findall('<a href="(detail\?id=.+?)"',html)
if (len(matches)!=0):
for match in matches:
id=match[10:]
links.append((id,match))
return links
else:
return None
#This method spiders the given page for the next page link
def nextSpider(html):
matches=re.search('<a href="(?P<Link>list\?start=.+?)">Next',html)
if (matches):
return matches.group('Link')
else:
return None
def run(utils,datasource_id):
print("\nGathering individual issues pages.")
#runs jobs
job=utils.get_job(datasource_id,"gather_issues_specific")
if(utils.error):
sys.exit()
while(job!=None):
try:
unixname=job[0]
print ("Gathering for "+unixname)
#gets issues page
issues=utils.get_issues(datasource_id, unixname)
if(issues):
issues=issues[0]
else:
issues=None
ids=utils.get_issue_ids(unixname)
page_num=1
#starts page searching for every page of issues
next=True;
error=False;
while(next):
print("\tGathering for page "+str(page_num))
#checks issue page for None-type and forbidden page
if(issues):
if(re.search('<title>Project hosting on Google Code</title>',issues)==None):
#Gathers and inserts pages for each issue
links=issuesSpider(issues)
if(links):
for link in links:
id=int(link[0])
link=link[1]
if(not id in ids):
time.sleep(3)
print("\t\tInserting issue id "+str(id)+" for "+unixname)
issuePage=utils.get_page(BASE_LINK+"/p/"+unixname+"/issues/"+link)
insert="""INSERT IGNORE INTO gc_issues_indexes (unixname,issue_id,html,datasource_id,last_modified)
VALUES(%s,%s,%s,%s,NOW())"""
utils.db_insert(insert,unixname,id,issuePage,datasource_id)
else:
print"\t\tIssue id "+str(id)+" has already been collected for "+unixname
#checks for next page and collects accordingly
next_link=nextSpider(issues);
if(next_link):
page_num=page_num+1
issues=utils.get_page(BASE_LINK+"/p/"+unixname+"/issues/"+next_link)
else:
next=False
#If no links are found, halt loop
else:
print("\t!! No links found for "+unixname+" at page "+str(page_num))
next=False
#If home page is forbidden, halt loop
else:
print("\t!! Home page led to a forbidden page.")
next=False
#if home page gathering fails, set error and halt loop
else:
print("!!!WARNING!!!! Issues page gathering failed for "+unixname+" page "+page_num)
error_msg='gather_issues_specific: \nIssues page did not collect correctly.'
error=True
next=False
#if error set, post error and get new job
if(error):
utils.post_error(error_msg,datasource_id,unixname)
job=utils.get_job(datasource_id,'gather_issues_specific')
if(utils.error):
sys.exit()
#change status and get new job
else:
utils.change_status("completed","gather_issues_specific",datasource_id,unixname)
job=utils.get_job(datasource_id,'gather_issues_specific')
if(utils.error):
sys.exit()
#if process fails, post error and get new job
except:
print("!!!!WARNING!!! Individual issue pages collection failed")
utils.post_error('gather_issues_specific:\n'+traceback.format_exc(),datasource_id,unixname)
job=utils.get_job(datasource_id,'gather_issues_specific')
if(utils.error):
sys.exit() | Python |
'''
Created on Mar 27, 2010
This module includes the necessary utilities for the Google Code spider.
@author: StevenNorris
'''
import MySQLdb
import traceback
import urllib2
import socket
class GoogleCodeUtils:
#this gathers the initial connection to the database
def __init__(self,file_name):
try:
dbfile = open(file_name, 'r')
except:
print(traceback.format_exc())
raise Exception("Database file error: "+file_name)
self.host = dbfile.readline().strip()
self.port = int(dbfile.readline().strip())
self.username = dbfile.readline().strip()
self.password = dbfile.readline().strip()
self.database = dbfile.readline().strip()
self.db=MySQLdb.connect(host=self.host, user=self.username, passwd=self.password, db=self.database)
self.cursor = self.db.cursor()
self.error=False
'''
This method provides the ability to gather a page
'''
def get_page(self,url):
try:
response = urllib2.urlopen(url)
html = response.read()
return html
except:
print ("The page request failed.")
'''
This method provides the ability to insert into a database
'''
def db_insert(self,query_string,*params):
try:
self.cursor.execute(query_string, params)
except:
print("!!!!WARNING!!!! Insertion into "+self.database+" failed.")
print(traceback.format_exc())
'''
This method provides the ability to get a job from the job database.
'''
def get_job(self, datasource_id, status):
lock = '''LOCK TABLE gc_jobs READ, gc_jobs AS t WRITE'''
select = '''SELECT unixname
FROM gc_jobs AS t
WHERE status = %s
AND datasource_id = %s
ORDER BY unixname
LIMIT 1'''
update='''UPDATE gc_jobs AS t SET status='In_Progress', last_modified=NOW()
WHERE datasource_id=%s
AND unixname=%s
'''
unlock = '''UNLOCK TABLES'''
try:
self.cursor.execute(lock)
self.cursor.execute(select, (status,datasource_id))
result = self.cursor.fetchone()
self.cursor.execute(update,(datasource_id, result[0]))
self.cursor.execute(unlock)
return result
except:
print ("Finding job failed.")
self.cursor.execute(unlock)
#this method allows for status changes
def change_status(self,status,previous,datasource_id,unixname):
update='''UPDATE gc_jobs
SET status=%s, last_modified=NOW(), previous_stage=%s, modified_by=%s
WHERE datasource_id=%s
AND unixname=%s
'''
try:
self.cursor.execute(update,(status,previous,socket.gethostname(),datasource_id,unixname))
except:
print('!!!!WARNING!!!! Status '+status+' did not update correctly for '+unixname+' with id '+datasource_id+'.')
print(traceback.format_exc())
self.error=True
#this method allows for error posting
def post_error(self,message,datasource_id,unixname):
update='''UPDATE gc_jobs
SET error_msg=%s, status='error', last_modified=NOW(), modified_by=%s
WHERE datasource_id=%s
AND unixname=%s'''
try:
self.cursor.execute(update,(message,socket.gethostname(),datasource_id,unixname))
except:
print('!!!!WARNING!!!! Error '+message+'could not be posted for'+unixname+' at '+datasource_id+'.')
self.error=True
#Gathers the projects list from projects_list
def get_projects(self,datasource_id):
try:
select="SELECT proj_name FROM gc_projects WHERE datasource_id=%s"
self.cursor.execute(select,(datasource_id))
projects_list=self.cursor.fetchall()
return projects_list
except:
print("!!!!WARNING!!! Collecting projects list failed.")
#Gathers the homepage for a specific project
def get_home(self,datasource_id,unixname):
try:
select='''SELECT homehtml FROM gc_project_indexes WHERE datasource_id=%s AND unixname=%s'''
self.cursor.execute(select,(datasource_id,unixname))
home=self.cursor.fetchone()
return home
except:
print("!!!!WARNING!!!! Collecting home page failed.")
print(traceback.format_exc())
#Gathers the issuespage for a specific project
def get_issues(self,datasource_id,unixname):
try:
select='''SELECT issueshtml FROM gc_project_indexes WHERE datasource_id=%s AND unixname=%s'''
self.cursor.execute(select,(datasource_id,unixname))
home=self.cursor.fetchone()
return home
except:
print("!!!!WARNING!!!! Collecting issues page failed.")
print(traceback.format_exc())
#Gathers the issues ids for a specific project
def get_issue_ids(self,unixname):
try:
select='''SELECT issue_id FROM gc_issues_indexes WHERE unixname=%s'''
self.cursor.execute(select,(unixname))
idsRaw=self.cursor.fetchall()
ids=[]
for id in idsRaw:
ids.append(id[0])
return ids
except:
print("!!!!WARNING!!!! Collecting issue ids failed.")
print(traceback.format_exc())
'''
This method provides the ability to get a clean up job from the job database.
'''
def get_cleanup_job(self, datasource_id, previousStage):
lock = '''LOCK TABLE gc_jobs READ, gc_jobs AS t WRITE'''
select = '''SELECT unixname
FROM gc_jobs AS t
WHERE status = 'In_Progress'
AND datasource_id = %s
AND previous_stage = %s
ORDER BY unixname
LIMIT 1'''
update='''UPDATE gc_jobs AS t SET status='Clean_Up', last_modified=NOW()
WHERE datasource_id=%s
AND unixname=%s
'''
unlock = '''UNLOCK TABLES'''
try:
self.cursor.execute(lock)
self.cursor.execute(select, (datasource_id,previousStage))
result = self.cursor.fetchone()
self.cursor.execute(update,(datasource_id, result[0]))
self.cursor.execute(unlock)
return result
except:
print ("Finding job failed.")
self.cursor.execute(unlock)
#This method allows for the deletion of a project from the gc_project_indexes
def delete_home(self,unixname,datasource_id):
try:
update="""DELETE FROM gc_project_indexes WHERE unixname=%s AND datasource_id=%s"""
self.cursor.execute(update,(unixname,datasource_id))
except:
print("!!!!WARNING!!!! Deletion of home failed.")
print (traceback.format_exc())
#This method allows for the deletion of a updates page for a project from the gc_project_indexes
def delete_updates(self,unixname,datasource_id):
try:
update="""UPDATE gc_project_indexes SET updateshtml=NULL WHERE unixname=%s AND datasource_id=%s"""
self.cursor.execute(update,(unixname,datasource_id))
except:
print("!!!!WARNING!!!! Deletion of updates page failed.")
print (traceback.format_exc())
#This method allows for the deletion of a people page for a project from the gc_project_indexes
def delete_people(self,unixname,datasource_id):
try:
update="""UPDATE gc_project_indexes SET peoplehtml=NULL WHERE unixname=%s AND datasource_id=%s"""
self.cursor.execute(update,(unixname,datasource_id))
except:
print("!!!!WARNING!!!! Deletion of people page failed.")
print (traceback.format_exc())
#This method allows for the deletion of a downloads page for a project from the gc_project_indexes
def delete_downloads(self,unixname,datasource_id):
try:
update="""UPDATE gc_project_indexes SET downloadshtml=NULL WHERE unixname=%s AND datasource_id=%s"""
self.cursor.execute(update,(unixname,datasource_id))
except:
print("!!!!WARNING!!!! Deletion of downloads page failed.")
print (traceback.format_exc())
#This method allows for the deletion of a issues page for a project from the gc_project_indexes
def delete_issues(self,unixname,datasource_id):
try:
update="""UPDATE gc_project_indexes SET issueshtml=NULL WHERE unixname=%s AND datasource_id=%s"""
self.cursor.execute(update,(unixname,datasource_id))
except:
print("!!!!WARNING!!!! Deletion of issues page failed.")
print (traceback.format_exc())
#This method allows for the deletion of a wiki page for a project from the gc_project_indexes
def delete_wiki(self,unixname,datasource_id):
try:
update="""UPDATE gc_project_indexes SET wikihtml=NULL WHERE unixname=%s AND datasource_id=%s"""
self.cursor.execute(update,(unixname,datasource_id))
except:
print("!!!!WARNING!!!! Deletion of wiki page failed.")
print (traceback.format_exc()) | Python |
'''
Created on Mar 27, 2010
This module does the spidering for the code.google.com project downloads pages.
@author: StevenNorris
'''
BASE_LINK="http://code.google.com/p/"
import sys
import time
import traceback
def run(utils,datasource_id):
print("\nGathering downloads pages.")
#runs jobs
job=utils.get_job(datasource_id,"gather_downloads")
if(utils.error):
sys.exit()
while(job!=None):
time.sleep(3)
try:
unixname=job[0]
print("Gathering for "+unixname)
#gets home page
downloads=utils.get_page(BASE_LINK+unixname+"/downloads/list")
#inserts home page
if(downloads):
insert="""UPDATE gc_project_indexes SET downloadshtml=%s, last_modified=NOW() WHERE unixname=%s AND datasource_id=%s"""
utils.db_insert(insert,downloads,unixname,datasource_id)
utils.change_status('gather_issues','gather_downloads',datasource_id,unixname)
#get new job
job=utils.get_job(datasource_id,'gather_downloads')
if(utils.error):
sys.exit()
#if downloads page does not collect properly, post error and get new job
else:
print("!!!!WARNING!!!! Downloads page gathering failed for "+unixname)
utils.post_error('gather_downloads: \nDownloads page either did not exist or fail to collect.' ,datasource_id,unixname)
job=utils.get_job(datasource_id,'gather_downloads')
if(utils.error):
sys.exit()
#if process fails, post error and get new job
except:
print("!!!!WARNING!!! Downloads page collection failed")
utils.post_error('gather_downloads:\n'+traceback.format_exc(),datasource_id,unixname)
job=utils.get_job(datasource_id,'gather_downloads')
if(utils.error):
sys.exit() | Python |
'''
Created on Apr 12, 2010
This module performs the clean up for gather_issues_specific jobs.
@author: StevenNorris
'''
import sys
import traceback
def run(utils,datasource_id):
#Cleans up gather_wiki jobs
print("\nStarting people specific clean up.")
#Gets Job
job=utils.get_cleanup_job(datasource_id,'gather_issues_specific')
if(utils.error):
sys.exit()
while(job!=None):
#Cleans up for the job
try:
unixname=job[0]
print "Cleaning up for "+unixname
utils.change_status('gather_issues_specific','Clean_Up',datasource_id,unixname)
job=utils.get_cleanup_job(datasource_id,'gather_people_specific')
if(utils.error):
sys.exit()
#If process fails, post error and get new job
except:
print("!!!!WARNING!!!! Clean up issues specific for "+unixname+" failed.")
utils.post_error('Clean_Up(issues specific):\n'+traceback.format_exc(),datasource_id,unixname)
job=utils.get_cleanup_job(datasource_id,'gather_issues_specific')
if(utils.error):
sys.exit()
| Python |
'''
Created on Mar 27, 2010
This module does the spidering for the code.google.com project updates pages.
@author: StevenNorris
'''
BASE_LINK="http://code.google.com/p/"
import sys
import time
import traceback
def run(utils,datasource_id):
print("\nGathering updates pages.")
#runs jobs
job=utils.get_job(datasource_id,"gather_updates")
if(utils.error):
sys.exit()
while(job!=None):
time.sleep(3)
try:
unixname=job[0]
print("Gathering for "+unixname)
#gets home page
updates=utils.get_page(BASE_LINK+unixname+"/updates/list")
#inserts home page
if(updates):
insert="""UPDATE gc_project_indexes SET updateshtml=%s, last_modified=NOW() WHERE unixname=%s AND datasource_id=%s"""
utils.db_insert(insert,updates,unixname,datasource_id)
utils.change_status('gather_people','gather_updates',datasource_id,unixname)
#get new job
job=utils.get_job(datasource_id,'gather_updates')
if(utils.error):
sys.exit()
#if updates page does not collect properly, post error and get new job
else:
print("!!!!WARNING!!!! Updates page gathering failed for "+unixname)
utils.post_error('gather_Updates: \nUpdates page either did not exist or fail to collect.' ,datasource_id,unixname)
job=utils.get_job(datasource_id,'gather_updates')
if(utils.error):
sys.exit()
#if process fails, post error and get new job
except:
print("!!!!WARNING!!! Updates page collection failed")
utils.post_error('gather_updates:\n'+traceback.format_exc(),datasource_id,unixname)
job=utils.get_job(datasource_id,'gather_updates')
if(utils.error):
sys.exit()
| Python |
"""Beautiful Soup
Elixir and Tonic
"The Screen-Scraper's Friend"
http://www.crummy.com/software/BeautifulSoup/
Beautiful Soup parses a (possibly invalid) XML or HTML document into a
tree representation. It provides methods and Pythonic idioms that make
it easy to navigate, search, and modify the tree.
A well-formed XML/HTML document yields a well-formed data
structure. An ill-formed XML/HTML document yields a correspondingly
ill-formed data structure. If your document is only locally
well-formed, you can use this library to find and process the
well-formed part of it.
Beautiful Soup works with Python 2.2 and up. It has no external
dependencies, but you'll have more success at converting data to UTF-8
if you also install these three packages:
* chardet, for auto-detecting character encodings
http://chardet.feedparser.org/
* cjkcodecs and iconv_codec, which add more encodings to the ones supported
by stock Python.
http://cjkpython.i18n.org/
Beautiful Soup defines classes for two main parsing strategies:
* BeautifulStoneSoup, for parsing XML, SGML, or your domain-specific
language that kind of looks like XML.
* BeautifulSoup, for parsing run-of-the-mill HTML code, be it valid
or invalid. This class has web browser-like heuristics for
obtaining a sensible parse tree in the face of common HTML errors.
Beautiful Soup also defines a class (UnicodeDammit) for autodetecting
the encoding of an HTML or XML document, and converting it to
Unicode. Much of this code is taken from Mark Pilgrim's Universal Feed Parser.
For more than you ever wanted to know about Beautiful Soup, see the
documentation:
http://www.crummy.com/software/BeautifulSoup/documentation.html
Here, have some legalese:
Copyright (c) 2004-2010, Leonard Richardson
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
* Neither the name of the the Beautiful Soup Consortium and All
Night Kosher Bakery nor the names of its contributors may be
used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE, DAMMIT.
"""
from __future__ import generators
__author__ = "Leonard Richardson (leonardr@segfault.org)"
__version__ = "3.0.8.1"
__copyright__ = "Copyright (c) 2004-2010 Leonard Richardson"
__license__ = "New-style BSD"
from sgmllib import SGMLParser, SGMLParseError
import codecs
import markupbase
import types
import re
import sgmllib
try:
from htmlentitydefs import name2codepoint
except ImportError:
name2codepoint = {}
try:
set
except NameError:
from sets import Set as set
#These hacks make Beautiful Soup able to parse XML with namespaces
sgmllib.tagfind = re.compile('[a-zA-Z][-_.:a-zA-Z0-9]*')
markupbase._declname_match = re.compile(r'[a-zA-Z][-_.:a-zA-Z0-9]*\s*').match
DEFAULT_OUTPUT_ENCODING = "utf-8"
def _match_css_class(str):
"""Build a RE to match the given CSS class."""
return re.compile(r"(^|.*\s)%s($|\s)" % str)
# First, the classes that represent markup elements.
class PageElement(object):
"""Contains the navigational information for some part of the page
(either a tag or a piece of text)"""
def setup(self, parent=None, previous=None):
"""Sets up the initial relations between this element and
other elements."""
self.parent = parent
self.previous = previous
self.next = None
self.previousSibling = None
self.nextSibling = None
if self.parent and self.parent.contents:
self.previousSibling = self.parent.contents[-1]
self.previousSibling.nextSibling = self
def replaceWith(self, replaceWith):
oldParent = self.parent
myIndex = self.parent.index(self)
if hasattr(replaceWith, "parent")\
and replaceWith.parent is self.parent:
# We're replacing this element with one of its siblings.
index = replaceWith.parent.index(replaceWith)
if index and index < myIndex:
# Furthermore, it comes before this element. That
# means that when we extract it, the index of this
# element will change.
myIndex = myIndex - 1
self.extract()
oldParent.insert(myIndex, replaceWith)
def replaceWithChildren(self):
myParent = self.parent
myIndex = self.parent.index(self)
self.extract()
reversedChildren = list(self.contents)
reversedChildren.reverse()
for child in reversedChildren:
myParent.insert(myIndex, child)
def extract(self):
"""Destructively rips this element out of the tree."""
if self.parent:
try:
del self.parent.contents[self.parent.index(self)]
except ValueError:
pass
#Find the two elements that would be next to each other if
#this element (and any children) hadn't been parsed. Connect
#the two.
lastChild = self._lastRecursiveChild()
nextElement = lastChild.next
if self.previous:
self.previous.next = nextElement
if nextElement:
nextElement.previous = self.previous
self.previous = None
lastChild.next = None
self.parent = None
if self.previousSibling:
self.previousSibling.nextSibling = self.nextSibling
if self.nextSibling:
self.nextSibling.previousSibling = self.previousSibling
self.previousSibling = self.nextSibling = None
return self
def _lastRecursiveChild(self):
"Finds the last element beneath this object to be parsed."
lastChild = self
while hasattr(lastChild, 'contents') and lastChild.contents:
lastChild = lastChild.contents[-1]
return lastChild
def insert(self, position, newChild):
if isinstance(newChild, basestring) \
and not isinstance(newChild, NavigableString):
newChild = NavigableString(newChild)
position = min(position, len(self.contents))
if hasattr(newChild, 'parent') and newChild.parent is not None:
# We're 'inserting' an element that's already one
# of this object's children.
if newChild.parent is self:
index = self.index(newChild)
if index > position:
# Furthermore we're moving it further down the
# list of this object's children. That means that
# when we extract this element, our target index
# will jump down one.
position = position - 1
newChild.extract()
newChild.parent = self
previousChild = None
if position == 0:
newChild.previousSibling = None
newChild.previous = self
else:
previousChild = self.contents[position-1]
newChild.previousSibling = previousChild
newChild.previousSibling.nextSibling = newChild
newChild.previous = previousChild._lastRecursiveChild()
if newChild.previous:
newChild.previous.next = newChild
newChildsLastElement = newChild._lastRecursiveChild()
if position >= len(self.contents):
newChild.nextSibling = None
parent = self
parentsNextSibling = None
while not parentsNextSibling:
parentsNextSibling = parent.nextSibling
parent = parent.parent
if not parent: # This is the last element in the document.
break
if parentsNextSibling:
newChildsLastElement.next = parentsNextSibling
else:
newChildsLastElement.next = None
else:
nextChild = self.contents[position]
newChild.nextSibling = nextChild
if newChild.nextSibling:
newChild.nextSibling.previousSibling = newChild
newChildsLastElement.next = nextChild
if newChildsLastElement.next:
newChildsLastElement.next.previous = newChildsLastElement
self.contents.insert(position, newChild)
def append(self, tag):
"""Appends the given tag to the contents of this tag."""
self.insert(len(self.contents), tag)
def findNext(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the first item that matches the given criteria and
appears after this Tag in the document."""
return self._findOne(self.findAllNext, name, attrs, text, **kwargs)
def findAllNext(self, name=None, attrs={}, text=None, limit=None,
**kwargs):
"""Returns all items that match the given criteria and appear
after this Tag in the document."""
return self._findAll(name, attrs, text, limit, self.nextGenerator,
**kwargs)
def findNextSibling(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the closest sibling to this Tag that matches the
given criteria and appears after this Tag in the document."""
return self._findOne(self.findNextSiblings, name, attrs, text,
**kwargs)
def findNextSiblings(self, name=None, attrs={}, text=None, limit=None,
**kwargs):
"""Returns the siblings of this Tag that match the given
criteria and appear after this Tag in the document."""
return self._findAll(name, attrs, text, limit,
self.nextSiblingGenerator, **kwargs)
fetchNextSiblings = findNextSiblings # Compatibility with pre-3.x
def findPrevious(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the first item that matches the given criteria and
appears before this Tag in the document."""
return self._findOne(self.findAllPrevious, name, attrs, text, **kwargs)
def findAllPrevious(self, name=None, attrs={}, text=None, limit=None,
**kwargs):
"""Returns all items that match the given criteria and appear
before this Tag in the document."""
return self._findAll(name, attrs, text, limit, self.previousGenerator,
**kwargs)
fetchPrevious = findAllPrevious # Compatibility with pre-3.x
def findPreviousSibling(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the closest sibling to this Tag that matches the
given criteria and appears before this Tag in the document."""
return self._findOne(self.findPreviousSiblings, name, attrs, text,
**kwargs)
def findPreviousSiblings(self, name=None, attrs={}, text=None,
limit=None, **kwargs):
"""Returns the siblings of this Tag that match the given
criteria and appear before this Tag in the document."""
return self._findAll(name, attrs, text, limit,
self.previousSiblingGenerator, **kwargs)
fetchPreviousSiblings = findPreviousSiblings # Compatibility with pre-3.x
def findParent(self, name=None, attrs={}, **kwargs):
"""Returns the closest parent of this Tag that matches the given
criteria."""
# NOTE: We can't use _findOne because findParents takes a different
# set of arguments.
r = None
l = self.findParents(name, attrs, 1)
if l:
r = l[0]
return r
def findParents(self, name=None, attrs={}, limit=None, **kwargs):
"""Returns the parents of this Tag that match the given
criteria."""
return self._findAll(name, attrs, None, limit, self.parentGenerator,
**kwargs)
fetchParents = findParents # Compatibility with pre-3.x
#These methods do the real heavy lifting.
def _findOne(self, method, name, attrs, text, **kwargs):
r = None
l = method(name, attrs, text, 1, **kwargs)
if l:
r = l[0]
return r
def _findAll(self, name, attrs, text, limit, generator, **kwargs):
"Iterates over a generator looking for things that match."
if isinstance(name, SoupStrainer):
strainer = name
# (Possibly) special case some findAll*(...) searches
elif text is None and not limit and not attrs and not kwargs:
# findAll*(True)
if name is True:
return [element for element in generator()
if isinstance(element, Tag)]
# findAll*('tag-name')
elif isinstance(name, basestring):
return [element for element in generator()
if isinstance(element, Tag) and
element.name == name]
else:
strainer = SoupStrainer(name, attrs, text, **kwargs)
# Build a SoupStrainer
else:
strainer = SoupStrainer(name, attrs, text, **kwargs)
results = ResultSet(strainer)
g = generator()
while True:
try:
i = g.next()
except StopIteration:
break
if i:
found = strainer.search(i)
if found:
results.append(found)
if limit and len(results) >= limit:
break
return results
#These Generators can be used to navigate starting from both
#NavigableStrings and Tags.
def nextGenerator(self):
i = self
while i is not None:
i = i.next
yield i
def nextSiblingGenerator(self):
i = self
while i is not None:
i = i.nextSibling
yield i
def previousGenerator(self):
i = self
while i is not None:
i = i.previous
yield i
def previousSiblingGenerator(self):
i = self
while i is not None:
i = i.previousSibling
yield i
def parentGenerator(self):
i = self
while i is not None:
i = i.parent
yield i
# Utility methods
def substituteEncoding(self, str, encoding=None):
encoding = encoding or "utf-8"
return str.replace("%SOUP-ENCODING%", encoding)
def toEncoding(self, s, encoding=None):
"""Encodes an object to a string in some encoding, or to Unicode.
."""
if isinstance(s, unicode):
if encoding:
s = s.encode(encoding)
elif isinstance(s, str):
if encoding:
s = s.encode(encoding)
else:
s = unicode(s)
else:
if encoding:
s = self.toEncoding(str(s), encoding)
else:
s = unicode(s)
return s
class NavigableString(unicode, PageElement):
def __new__(cls, value):
"""Create a new NavigableString.
When unpickling a NavigableString, this method is called with
the string in DEFAULT_OUTPUT_ENCODING. That encoding needs to be
passed in to the superclass's __new__ or the superclass won't know
how to handle non-ASCII characters.
"""
if isinstance(value, unicode):
return unicode.__new__(cls, value)
return unicode.__new__(cls, value, DEFAULT_OUTPUT_ENCODING)
def __getnewargs__(self):
return (NavigableString.__str__(self),)
def __getattr__(self, attr):
"""text.string gives you text. This is for backwards
compatibility for Navigable*String, but for CData* it lets you
get the string without the CData wrapper."""
if attr == 'string':
return self
else:
raise AttributeError, "'%s' object has no attribute '%s'" % (self.__class__.__name__, attr)
def __unicode__(self):
return str(self).decode(DEFAULT_OUTPUT_ENCODING)
def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING):
if encoding:
return self.encode(encoding)
else:
return self
class CData(NavigableString):
def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING):
return "<![CDATA[%s]]>" % NavigableString.__str__(self, encoding)
class ProcessingInstruction(NavigableString):
def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING):
output = self
if "%SOUP-ENCODING%" in output:
output = self.substituteEncoding(output, encoding)
return "<?%s?>" % self.toEncoding(output, encoding)
class Comment(NavigableString):
def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING):
return "<!--%s-->" % NavigableString.__str__(self, encoding)
class Declaration(NavigableString):
def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING):
return "<!%s>" % NavigableString.__str__(self, encoding)
class Tag(PageElement):
"""Represents a found HTML tag with its attributes and contents."""
def _invert(h):
"Cheap function to invert a hash."
i = {}
for k,v in h.items():
i[v] = k
return i
XML_ENTITIES_TO_SPECIAL_CHARS = { "apos" : "'",
"quot" : '"',
"amp" : "&",
"lt" : "<",
"gt" : ">" }
XML_SPECIAL_CHARS_TO_ENTITIES = _invert(XML_ENTITIES_TO_SPECIAL_CHARS)
def _convertEntities(self, match):
"""Used in a call to re.sub to replace HTML, XML, and numeric
entities with the appropriate Unicode characters. If HTML
entities are being converted, any unrecognized entities are
escaped."""
x = match.group(1)
if self.convertHTMLEntities and x in name2codepoint:
return unichr(name2codepoint[x])
elif x in self.XML_ENTITIES_TO_SPECIAL_CHARS:
if self.convertXMLEntities:
return self.XML_ENTITIES_TO_SPECIAL_CHARS[x]
else:
return u'&%s;' % x
elif len(x) > 0 and x[0] == '#':
# Handle numeric entities
if len(x) > 1 and x[1] == 'x':
return unichr(int(x[2:], 16))
else:
return unichr(int(x[1:]))
elif self.escapeUnrecognizedEntities:
return u'&%s;' % x
else:
return u'&%s;' % x
def __init__(self, parser, name, attrs=None, parent=None,
previous=None):
"Basic constructor."
# We don't actually store the parser object: that lets extracted
# chunks be garbage-collected
self.parserClass = parser.__class__
self.isSelfClosing = parser.isSelfClosingTag(name)
self.name = name
if attrs is None:
attrs = []
self.attrs = attrs
self.contents = []
self.setup(parent, previous)
self.hidden = False
self.containsSubstitutions = False
self.convertHTMLEntities = parser.convertHTMLEntities
self.convertXMLEntities = parser.convertXMLEntities
self.escapeUnrecognizedEntities = parser.escapeUnrecognizedEntities
# Convert any HTML, XML, or numeric entities in the attribute values.
convert = lambda(k, val): (k,
re.sub("&(#\d+|#x[0-9a-fA-F]+|\w+);",
self._convertEntities,
val))
self.attrs = map(convert, self.attrs)
def getString(self):
if (len(self.contents) == 1
and isinstance(self.contents[0], NavigableString)):
return self.contents[0]
def setString(self, string):
"""Replace the contents of the tag with a string"""
self.clear()
self.append(string)
string = property(getString, setString)
def getText(self, separator=u""):
if not len(self.contents):
return u""
stopNode = self._lastRecursiveChild().next
strings = []
current = self.contents[0]
while current is not stopNode:
if isinstance(current, NavigableString):
strings.append(current.strip())
current = current.next
return separator.join(strings)
text = property(getText)
def get(self, key, default=None):
"""Returns the value of the 'key' attribute for the tag, or
the value given for 'default' if it doesn't have that
attribute."""
return self._getAttrMap().get(key, default)
def clear(self):
"""Extract all children."""
for child in self.contents[:]:
child.extract()
def index(self, element):
for i, child in enumerate(self.contents):
if child is element:
return i
raise ValueError("Tag.index: element not in tag")
def has_key(self, key):
return self._getAttrMap().has_key(key)
def __getitem__(self, key):
"""tag[key] returns the value of the 'key' attribute for the tag,
and throws an exception if it's not there."""
return self._getAttrMap()[key]
def __iter__(self):
"Iterating over a tag iterates over its contents."
return iter(self.contents)
def __len__(self):
"The length of a tag is the length of its list of contents."
return len(self.contents)
def __contains__(self, x):
return x in self.contents
def __nonzero__(self):
"A tag is non-None even if it has no contents."
return True
def __setitem__(self, key, value):
"""Setting tag[key] sets the value of the 'key' attribute for the
tag."""
self._getAttrMap()
self.attrMap[key] = value
found = False
for i in range(0, len(self.attrs)):
if self.attrs[i][0] == key:
self.attrs[i] = (key, value)
found = True
if not found:
self.attrs.append((key, value))
self._getAttrMap()[key] = value
def __delitem__(self, key):
"Deleting tag[key] deletes all 'key' attributes for the tag."
for item in self.attrs:
if item[0] == key:
self.attrs.remove(item)
#We don't break because bad HTML can define the same
#attribute multiple times.
self._getAttrMap()
if self.attrMap.has_key(key):
del self.attrMap[key]
def __call__(self, *args, **kwargs):
"""Calling a tag like a function is the same as calling its
findAll() method. Eg. tag('a') returns a list of all the A tags
found within this tag."""
return apply(self.findAll, args, kwargs)
def __getattr__(self, tag):
#print "Getattr %s.%s" % (self.__class__, tag)
if len(tag) > 3 and tag.rfind('Tag') == len(tag)-3:
return self.find(tag[:-3])
elif tag.find('__') != 0:
return self.find(tag)
raise AttributeError, "'%s' object has no attribute '%s'" % (self.__class__, tag)
def __eq__(self, other):
"""Returns true iff this tag has the same name, the same attributes,
and the same contents (recursively) as the given tag.
NOTE: right now this will return false if two tags have the
same attributes in a different order. Should this be fixed?"""
if other is self:
return True
if not hasattr(other, 'name') or not hasattr(other, 'attrs') or not hasattr(other, 'contents') or self.name != other.name or self.attrs != other.attrs or len(self) != len(other):
return False
for i in range(0, len(self.contents)):
if self.contents[i] != other.contents[i]:
return False
return True
def __ne__(self, other):
"""Returns true iff this tag is not identical to the other tag,
as defined in __eq__."""
return not self == other
def __repr__(self, encoding=DEFAULT_OUTPUT_ENCODING):
"""Renders this tag as a string."""
return self.__str__(encoding)
def __unicode__(self):
return self.__str__(None)
BARE_AMPERSAND_OR_BRACKET = re.compile("([<>]|"
+ "&(?!#\d+;|#x[0-9a-fA-F]+;|\w+;)"
+ ")")
def _sub_entity(self, x):
"""Used with a regular expression to substitute the
appropriate XML entity for an XML special character."""
return "&" + self.XML_SPECIAL_CHARS_TO_ENTITIES[x.group(0)[0]] + ";"
def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING,
prettyPrint=False, indentLevel=0):
"""Returns a string or Unicode representation of this tag and
its contents. To get Unicode, pass None for encoding.
NOTE: since Python's HTML parser consumes whitespace, this
method is not certain to reproduce the whitespace present in
the original string."""
encodedName = self.toEncoding(self.name, encoding)
attrs = []
if self.attrs:
for key, val in self.attrs:
fmt = '%s="%s"'
if isinstance(val, basestring):
if self.containsSubstitutions and '%SOUP-ENCODING%' in val:
val = self.substituteEncoding(val, encoding)
# The attribute value either:
#
# * Contains no embedded double quotes or single quotes.
# No problem: we enclose it in double quotes.
# * Contains embedded single quotes. No problem:
# double quotes work here too.
# * Contains embedded double quotes. No problem:
# we enclose it in single quotes.
# * Embeds both single _and_ double quotes. This
# can't happen naturally, but it can happen if
# you modify an attribute value after parsing
# the document. Now we have a bit of a
# problem. We solve it by enclosing the
# attribute in single quotes, and escaping any
# embedded single quotes to XML entities.
if '"' in val:
fmt = "%s='%s'"
if "'" in val:
# TODO: replace with apos when
# appropriate.
val = val.replace("'", "&squot;")
# Now we're okay w/r/t quotes. But the attribute
# value might also contain angle brackets, or
# ampersands that aren't part of entities. We need
# to escape those to XML entities too.
val = self.BARE_AMPERSAND_OR_BRACKET.sub(self._sub_entity, val)
attrs.append(fmt % (self.toEncoding(key, encoding),
self.toEncoding(val, encoding)))
close = ''
closeTag = ''
if self.isSelfClosing:
close = ' /'
else:
closeTag = '</%s>' % encodedName
indentTag, indentContents = 0, 0
if prettyPrint:
indentTag = indentLevel
space = (' ' * (indentTag-1))
indentContents = indentTag + 1
contents = self.renderContents(encoding, prettyPrint, indentContents)
if self.hidden:
s = contents
else:
s = []
attributeString = ''
if attrs:
attributeString = ' ' + ' '.join(attrs)
if prettyPrint:
s.append(space)
s.append('<%s%s%s>' % (encodedName, attributeString, close))
if prettyPrint:
s.append("\n")
s.append(contents)
if prettyPrint and contents and contents[-1] != "\n":
s.append("\n")
if prettyPrint and closeTag:
s.append(space)
s.append(closeTag)
if prettyPrint and closeTag and self.nextSibling:
s.append("\n")
s = ''.join(s)
return s
def decompose(self):
"""Recursively destroys the contents of this tree."""
self.extract()
if len(self.contents) == 0:
return
current = self.contents[0]
while current is not None:
next = current.next
if isinstance(current, Tag):
del current.contents[:]
current.parent = None
current.previous = None
current.previousSibling = None
current.next = None
current.nextSibling = None
current = next
def prettify(self, encoding=DEFAULT_OUTPUT_ENCODING):
return self.__str__(encoding, True)
def renderContents(self, encoding=DEFAULT_OUTPUT_ENCODING,
prettyPrint=False, indentLevel=0):
"""Renders the contents of this tag as a string in the given
encoding. If encoding is None, returns a Unicode string.."""
s=[]
for c in self:
text = None
if isinstance(c, NavigableString):
text = c.__str__(encoding)
elif isinstance(c, Tag):
s.append(c.__str__(encoding, prettyPrint, indentLevel))
if text and prettyPrint:
text = text.strip()
if text:
if prettyPrint:
s.append(" " * (indentLevel-1))
s.append(text)
if prettyPrint:
s.append("\n")
return ''.join(s)
#Soup methods
def find(self, name=None, attrs={}, recursive=True, text=None,
**kwargs):
"""Return only the first child of this Tag matching the given
criteria."""
r = None
l = self.findAll(name, attrs, recursive, text, 1, **kwargs)
if l:
r = l[0]
return r
findChild = find
def findAll(self, name=None, attrs={}, recursive=True, text=None,
limit=None, **kwargs):
"""Extracts a list of Tag objects that match the given
criteria. You can specify the name of the Tag and any
attributes you want the Tag to have.
The value of a key-value pair in the 'attrs' map can be a
string, a list of strings, a regular expression object, or a
callable that takes a string and returns whether or not the
string matches for some custom definition of 'matches'. The
same is true of the tag name."""
generator = self.recursiveChildGenerator
if not recursive:
generator = self.childGenerator
return self._findAll(name, attrs, text, limit, generator, **kwargs)
findChildren = findAll
# Pre-3.x compatibility methods
first = find
fetch = findAll
def fetchText(self, text=None, recursive=True, limit=None):
return self.findAll(text=text, recursive=recursive, limit=limit)
def firstText(self, text=None, recursive=True):
return self.find(text=text, recursive=recursive)
#Private methods
def _getAttrMap(self):
"""Initializes a map representation of this tag's attributes,
if not already initialized."""
if not getattr(self, 'attrMap'):
self.attrMap = {}
for (key, value) in self.attrs:
self.attrMap[key] = value
return self.attrMap
#Generator methods
def childGenerator(self):
# Just use the iterator from the contents
return iter(self.contents)
def recursiveChildGenerator(self):
if not len(self.contents):
raise StopIteration
stopNode = self._lastRecursiveChild().next
current = self.contents[0]
while current is not stopNode:
yield current
current = current.next
# Next, a couple classes to represent queries and their results.
class SoupStrainer:
"""Encapsulates a number of ways of matching a markup element (tag or
text)."""
def __init__(self, name=None, attrs={}, text=None, **kwargs):
self.name = name
if isinstance(attrs, basestring):
kwargs['class'] = _match_css_class(attrs)
attrs = None
if kwargs:
if attrs:
attrs = attrs.copy()
attrs.update(kwargs)
else:
attrs = kwargs
self.attrs = attrs
self.text = text
def __str__(self):
if self.text:
return self.text
else:
return "%s|%s" % (self.name, self.attrs)
def searchTag(self, markupName=None, markupAttrs={}):
found = None
markup = None
if isinstance(markupName, Tag):
markup = markupName
markupAttrs = markup
callFunctionWithTagData = callable(self.name) \
and not isinstance(markupName, Tag)
if (not self.name) \
or callFunctionWithTagData \
or (markup and self._matches(markup, self.name)) \
or (not markup and self._matches(markupName, self.name)):
if callFunctionWithTagData:
match = self.name(markupName, markupAttrs)
else:
match = True
markupAttrMap = None
for attr, matchAgainst in self.attrs.items():
if not markupAttrMap:
if hasattr(markupAttrs, 'get'):
markupAttrMap = markupAttrs
else:
markupAttrMap = {}
for k,v in markupAttrs:
markupAttrMap[k] = v
attrValue = markupAttrMap.get(attr)
if not self._matches(attrValue, matchAgainst):
match = False
break
if match:
if markup:
found = markup
else:
found = markupName
return found
def search(self, markup):
#print 'looking for %s in %s' % (self, markup)
found = None
# If given a list of items, scan it for a text element that
# matches.
if hasattr(markup, "__iter__") \
and not isinstance(markup, Tag):
for element in markup:
if isinstance(element, NavigableString) \
and self.search(element):
found = element
break
# If it's a Tag, make sure its name or attributes match.
# Don't bother with Tags if we're searching for text.
elif isinstance(markup, Tag):
if not self.text:
found = self.searchTag(markup)
# If it's text, make sure the text matches.
elif isinstance(markup, NavigableString) or \
isinstance(markup, basestring):
if self._matches(markup, self.text):
found = markup
else:
raise Exception, "I don't know how to match against a %s" \
% markup.__class__
return found
def _matches(self, markup, matchAgainst):
#print "Matching %s against %s" % (markup, matchAgainst)
result = False
if matchAgainst is True:
result = markup is not None
elif callable(matchAgainst):
result = matchAgainst(markup)
else:
#Custom match methods take the tag as an argument, but all
#other ways of matching match the tag name as a string.
if isinstance(markup, Tag):
markup = markup.name
if markup and not isinstance(markup, basestring):
markup = unicode(markup)
#Now we know that chunk is either a string, or None.
if hasattr(matchAgainst, 'match'):
# It's a regexp object.
result = markup and matchAgainst.search(markup)
elif hasattr(matchAgainst, '__iter__'): # list-like
result = markup in matchAgainst
elif hasattr(matchAgainst, 'items'):
result = markup.has_key(matchAgainst)
elif matchAgainst and isinstance(markup, basestring):
if isinstance(markup, unicode):
matchAgainst = unicode(matchAgainst)
else:
matchAgainst = str(matchAgainst)
if not result:
result = matchAgainst == markup
return result
class ResultSet(list):
"""A ResultSet is just a list that keeps track of the SoupStrainer
that created it."""
def __init__(self, source):
list.__init__([])
self.source = source
# Now, some helper functions.
def buildTagMap(default, *args):
"""Turns a list of maps, lists, or scalars into a single map.
Used to build the SELF_CLOSING_TAGS, NESTABLE_TAGS, and
NESTING_RESET_TAGS maps out of lists and partial maps."""
built = {}
for portion in args:
if hasattr(portion, 'items'):
#It's a map. Merge it.
for k,v in portion.items():
built[k] = v
elif hasattr(portion, '__iter__'): # is a list
#It's a list. Map each item to the default.
for k in portion:
built[k] = default
else:
#It's a scalar. Map it to the default.
built[portion] = default
return built
# Now, the parser classes.
class BeautifulStoneSoup(Tag, SGMLParser):
"""This class contains the basic parser and search code. It defines
a parser that knows nothing about tag behavior except for the
following:
You can't close a tag without closing all the tags it encloses.
That is, "<foo><bar></foo>" actually means
"<foo><bar></bar></foo>".
[Another possible explanation is "<foo><bar /></foo>", but since
this class defines no SELF_CLOSING_TAGS, it will never use that
explanation.]
This class is useful for parsing XML or made-up markup languages,
or when BeautifulSoup makes an assumption counter to what you were
expecting."""
SELF_CLOSING_TAGS = {}
NESTABLE_TAGS = {}
RESET_NESTING_TAGS = {}
QUOTE_TAGS = {}
PRESERVE_WHITESPACE_TAGS = []
MARKUP_MASSAGE = [(re.compile('(<[^<>]*)/>'),
lambda x: x.group(1) + ' />'),
(re.compile('<!\s+([^<>]*)>'),
lambda x: '<!' + x.group(1) + '>')
]
ROOT_TAG_NAME = u'[document]'
HTML_ENTITIES = "html"
XML_ENTITIES = "xml"
XHTML_ENTITIES = "xhtml"
# TODO: This only exists for backwards-compatibility
ALL_ENTITIES = XHTML_ENTITIES
# Used when determining whether a text node is all whitespace and
# can be replaced with a single space. A text node that contains
# fancy Unicode spaces (usually non-breaking) should be left
# alone.
STRIP_ASCII_SPACES = { 9: None, 10: None, 12: None, 13: None, 32: None, }
def __init__(self, markup="", parseOnlyThese=None, fromEncoding=None,
markupMassage=True, smartQuotesTo=XML_ENTITIES,
convertEntities=None, selfClosingTags=None, isHTML=False):
"""The Soup object is initialized as the 'root tag', and the
provided markup (which can be a string or a file-like object)
is fed into the underlying parser.
sgmllib will process most bad HTML, and the BeautifulSoup
class has some tricks for dealing with some HTML that kills
sgmllib, but Beautiful Soup can nonetheless choke or lose data
if your data uses self-closing tags or declarations
incorrectly.
By default, Beautiful Soup uses regexes to sanitize input,
avoiding the vast majority of these problems. If the problems
don't apply to you, pass in False for markupMassage, and
you'll get better performance.
The default parser massage techniques fix the two most common
instances of invalid HTML that choke sgmllib:
<br/> (No space between name of closing tag and tag close)
<! --Comment--> (Extraneous whitespace in declaration)
You can pass in a custom list of (RE object, replace method)
tuples to get Beautiful Soup to scrub your input the way you
want."""
self.parseOnlyThese = parseOnlyThese
self.fromEncoding = fromEncoding
self.smartQuotesTo = smartQuotesTo
self.convertEntities = convertEntities
# Set the rules for how we'll deal with the entities we
# encounter
if self.convertEntities:
# It doesn't make sense to convert encoded characters to
# entities even while you're converting entities to Unicode.
# Just convert it all to Unicode.
self.smartQuotesTo = None
if convertEntities == self.HTML_ENTITIES:
self.convertXMLEntities = False
self.convertHTMLEntities = True
self.escapeUnrecognizedEntities = True
elif convertEntities == self.XHTML_ENTITIES:
self.convertXMLEntities = True
self.convertHTMLEntities = True
self.escapeUnrecognizedEntities = False
elif convertEntities == self.XML_ENTITIES:
self.convertXMLEntities = True
self.convertHTMLEntities = False
self.escapeUnrecognizedEntities = False
else:
self.convertXMLEntities = False
self.convertHTMLEntities = False
self.escapeUnrecognizedEntities = False
self.instanceSelfClosingTags = buildTagMap(None, selfClosingTags)
SGMLParser.__init__(self)
if hasattr(markup, 'read'): # It's a file-type object.
markup = markup.read()
self.markup = markup
self.markupMassage = markupMassage
try:
self._feed(isHTML=isHTML)
except StopParsing:
pass
self.markup = None # The markup can now be GCed
def convert_charref(self, name):
"""This method fixes a bug in Python's SGMLParser."""
try:
n = int(name)
except ValueError:
return
if not 0 <= n <= 127 : # ASCII ends at 127, not 255
return
return self.convert_codepoint(n)
def _feed(self, inDocumentEncoding=None, isHTML=False):
# Convert the document to Unicode.
markup = self.markup
if isinstance(markup, unicode):
if not hasattr(self, 'originalEncoding'):
self.originalEncoding = None
else:
dammit = UnicodeDammit\
(markup, [self.fromEncoding, inDocumentEncoding],
smartQuotesTo=self.smartQuotesTo, isHTML=isHTML)
markup = dammit.unicode
self.originalEncoding = dammit.originalEncoding
self.declaredHTMLEncoding = dammit.declaredHTMLEncoding
if markup:
if self.markupMassage:
if not hasattr(self.markupMassage, "__iter__"):
self.markupMassage = self.MARKUP_MASSAGE
for fix, m in self.markupMassage:
markup = fix.sub(m, markup)
# TODO: We get rid of markupMassage so that the
# soup object can be deepcopied later on. Some
# Python installations can't copy regexes. If anyone
# was relying on the existence of markupMassage, this
# might cause problems.
del(self.markupMassage)
self.reset()
SGMLParser.feed(self, markup)
# Close out any unfinished strings and close all the open tags.
self.endData()
while self.currentTag.name != self.ROOT_TAG_NAME:
self.popTag()
def __getattr__(self, methodName):
"""This method routes method call requests to either the SGMLParser
superclass or the Tag superclass, depending on the method name."""
#print "__getattr__ called on %s.%s" % (self.__class__, methodName)
if methodName.startswith('start_') or methodName.startswith('end_') \
or methodName.startswith('do_'):
return SGMLParser.__getattr__(self, methodName)
elif not methodName.startswith('__'):
return Tag.__getattr__(self, methodName)
else:
raise AttributeError
def isSelfClosingTag(self, name):
"""Returns true iff the given string is the name of a
self-closing tag according to this parser."""
return self.SELF_CLOSING_TAGS.has_key(name) \
or self.instanceSelfClosingTags.has_key(name)
def reset(self):
Tag.__init__(self, self, self.ROOT_TAG_NAME)
self.hidden = 1
SGMLParser.reset(self)
self.currentData = []
self.currentTag = None
self.tagStack = []
self.quoteStack = []
self.pushTag(self)
def popTag(self):
tag = self.tagStack.pop()
#print "Pop", tag.name
if self.tagStack:
self.currentTag = self.tagStack[-1]
return self.currentTag
def pushTag(self, tag):
#print "Push", tag.name
if self.currentTag:
self.currentTag.contents.append(tag)
self.tagStack.append(tag)
self.currentTag = self.tagStack[-1]
def endData(self, containerClass=NavigableString):
if self.currentData:
currentData = u''.join(self.currentData)
if (currentData.translate(self.STRIP_ASCII_SPACES) == '' and
not set([tag.name for tag in self.tagStack]).intersection(
self.PRESERVE_WHITESPACE_TAGS)):
if '\n' in currentData:
currentData = '\n'
else:
currentData = ' '
self.currentData = []
if self.parseOnlyThese and len(self.tagStack) <= 1 and \
(not self.parseOnlyThese.text or \
not self.parseOnlyThese.search(currentData)):
return
o = containerClass(currentData)
o.setup(self.currentTag, self.previous)
if self.previous:
self.previous.next = o
self.previous = o
self.currentTag.contents.append(o)
def _popToTag(self, name, inclusivePop=True):
"""Pops the tag stack up to and including the most recent
instance of the given tag. If inclusivePop is false, pops the tag
stack up to but *not* including the most recent instqance of
the given tag."""
#print "Popping to %s" % name
if name == self.ROOT_TAG_NAME:
return
numPops = 0
mostRecentTag = None
for i in range(len(self.tagStack)-1, 0, -1):
if name == self.tagStack[i].name:
numPops = len(self.tagStack)-i
break
if not inclusivePop:
numPops = numPops - 1
for i in range(0, numPops):
mostRecentTag = self.popTag()
return mostRecentTag
def _smartPop(self, name):
"""We need to pop up to the previous tag of this type, unless
one of this tag's nesting reset triggers comes between this
tag and the previous tag of this type, OR unless this tag is a
generic nesting trigger and another generic nesting trigger
comes between this tag and the previous tag of this type.
Examples:
<p>Foo<b>Bar *<p>* should pop to 'p', not 'b'.
<p>Foo<table>Bar *<p>* should pop to 'table', not 'p'.
<p>Foo<table><tr>Bar *<p>* should pop to 'tr', not 'p'.
<li><ul><li> *<li>* should pop to 'ul', not the first 'li'.
<tr><table><tr> *<tr>* should pop to 'table', not the first 'tr'
<td><tr><td> *<td>* should pop to 'tr', not the first 'td'
"""
nestingResetTriggers = self.NESTABLE_TAGS.get(name)
isNestable = nestingResetTriggers != None
isResetNesting = self.RESET_NESTING_TAGS.has_key(name)
popTo = None
inclusive = True
for i in range(len(self.tagStack)-1, 0, -1):
p = self.tagStack[i]
if (not p or p.name == name) and not isNestable:
#Non-nestable tags get popped to the top or to their
#last occurance.
popTo = name
break
if (nestingResetTriggers is not None
and p.name in nestingResetTriggers) \
or (nestingResetTriggers is None and isResetNesting
and self.RESET_NESTING_TAGS.has_key(p.name)):
#If we encounter one of the nesting reset triggers
#peculiar to this tag, or we encounter another tag
#that causes nesting to reset, pop up to but not
#including that tag.
popTo = p.name
inclusive = False
break
p = p.parent
if popTo:
self._popToTag(popTo, inclusive)
def unknown_starttag(self, name, attrs, selfClosing=0):
#print "Start tag %s: %s" % (name, attrs)
if self.quoteStack:
#This is not a real tag.
#print "<%s> is not real!" % name
attrs = ''.join([' %s="%s"' % (x, y) for x, y in attrs])
self.handle_data('<%s%s>' % (name, attrs))
return
self.endData()
if not self.isSelfClosingTag(name) and not selfClosing:
self._smartPop(name)
if self.parseOnlyThese and len(self.tagStack) <= 1 \
and (self.parseOnlyThese.text or not self.parseOnlyThese.searchTag(name, attrs)):
return
tag = Tag(self, name, attrs, self.currentTag, self.previous)
if self.previous:
self.previous.next = tag
self.previous = tag
self.pushTag(tag)
if selfClosing or self.isSelfClosingTag(name):
self.popTag()
if name in self.QUOTE_TAGS:
#print "Beginning quote (%s)" % name
self.quoteStack.append(name)
self.literal = 1
return tag
def unknown_endtag(self, name):
#print "End tag %s" % name
if self.quoteStack and self.quoteStack[-1] != name:
#This is not a real end tag.
#print "</%s> is not real!" % name
self.handle_data('</%s>' % name)
return
self.endData()
self._popToTag(name)
if self.quoteStack and self.quoteStack[-1] == name:
self.quoteStack.pop()
self.literal = (len(self.quoteStack) > 0)
def handle_data(self, data):
self.currentData.append(data)
def _toStringSubclass(self, text, subclass):
"""Adds a certain piece of text to the tree as a NavigableString
subclass."""
self.endData()
self.handle_data(text)
self.endData(subclass)
def handle_pi(self, text):
"""Handle a processing instruction as a ProcessingInstruction
object, possibly one with a %SOUP-ENCODING% slot into which an
encoding will be plugged later."""
if text[:3] == "xml":
text = u"xml version='1.0' encoding='%SOUP-ENCODING%'"
self._toStringSubclass(text, ProcessingInstruction)
def handle_comment(self, text):
"Handle comments as Comment objects."
self._toStringSubclass(text, Comment)
def handle_charref(self, ref):
"Handle character references as data."
if self.convertEntities:
data = unichr(int(ref))
else:
data = '&#%s;' % ref
self.handle_data(data)
def handle_entityref(self, ref):
"""Handle entity references as data, possibly converting known
HTML and/or XML entity references to the corresponding Unicode
characters."""
data = None
if self.convertHTMLEntities:
try:
data = unichr(name2codepoint[ref])
except KeyError:
pass
if not data and self.convertXMLEntities:
data = self.XML_ENTITIES_TO_SPECIAL_CHARS.get(ref)
if not data and self.convertHTMLEntities and \
not self.XML_ENTITIES_TO_SPECIAL_CHARS.get(ref):
# TODO: We've got a problem here. We're told this is
# an entity reference, but it's not an XML entity
# reference or an HTML entity reference. Nonetheless,
# the logical thing to do is to pass it through as an
# unrecognized entity reference.
#
# Except: when the input is "&carol;" this function
# will be called with input "carol". When the input is
# "AT&T", this function will be called with input
# "T". We have no way of knowing whether a semicolon
# was present originally, so we don't know whether
# this is an unknown entity or just a misplaced
# ampersand.
#
# The more common case is a misplaced ampersand, so I
# escape the ampersand and omit the trailing semicolon.
data = "&%s" % ref
if not data:
# This case is different from the one above, because we
# haven't already gone through a supposedly comprehensive
# mapping of entities to Unicode characters. We might not
# have gone through any mapping at all. So the chances are
# very high that this is a real entity, and not a
# misplaced ampersand.
data = "&%s;" % ref
self.handle_data(data)
def handle_decl(self, data):
"Handle DOCTYPEs and the like as Declaration objects."
self._toStringSubclass(data, Declaration)
def parse_declaration(self, i):
"""Treat a bogus SGML declaration as raw data. Treat a CDATA
declaration as a CData object."""
j = None
if self.rawdata[i:i+9] == '<![CDATA[':
k = self.rawdata.find(']]>', i)
if k == -1:
k = len(self.rawdata)
data = self.rawdata[i+9:k]
j = k+3
self._toStringSubclass(data, CData)
else:
try:
j = SGMLParser.parse_declaration(self, i)
except SGMLParseError:
toHandle = self.rawdata[i:]
self.handle_data(toHandle)
j = i + len(toHandle)
return j
class BeautifulSoup(BeautifulStoneSoup):
"""This parser knows the following facts about HTML:
* Some tags have no closing tag and should be interpreted as being
closed as soon as they are encountered.
* The text inside some tags (ie. 'script') may contain tags which
are not really part of the document and which should be parsed
as text, not tags. If you want to parse the text as tags, you can
always fetch it and parse it explicitly.
* Tag nesting rules:
Most tags can't be nested at all. For instance, the occurance of
a <p> tag should implicitly close the previous <p> tag.
<p>Para1<p>Para2
should be transformed into:
<p>Para1</p><p>Para2
Some tags can be nested arbitrarily. For instance, the occurance
of a <blockquote> tag should _not_ implicitly close the previous
<blockquote> tag.
Alice said: <blockquote>Bob said: <blockquote>Blah
should NOT be transformed into:
Alice said: <blockquote>Bob said: </blockquote><blockquote>Blah
Some tags can be nested, but the nesting is reset by the
interposition of other tags. For instance, a <tr> tag should
implicitly close the previous <tr> tag within the same <table>,
but not close a <tr> tag in another table.
<table><tr>Blah<tr>Blah
should be transformed into:
<table><tr>Blah</tr><tr>Blah
but,
<tr>Blah<table><tr>Blah
should NOT be transformed into
<tr>Blah<table></tr><tr>Blah
Differing assumptions about tag nesting rules are a major source
of problems with the BeautifulSoup class. If BeautifulSoup is not
treating as nestable a tag your page author treats as nestable,
try ICantBelieveItsBeautifulSoup, MinimalSoup, or
BeautifulStoneSoup before writing your own subclass."""
def __init__(self, *args, **kwargs):
if not kwargs.has_key('smartQuotesTo'):
kwargs['smartQuotesTo'] = self.HTML_ENTITIES
kwargs['isHTML'] = True
BeautifulStoneSoup.__init__(self, *args, **kwargs)
SELF_CLOSING_TAGS = buildTagMap(None,
('br' , 'hr', 'input', 'img', 'meta',
'spacer', 'link', 'frame', 'base', 'col'))
PRESERVE_WHITESPACE_TAGS = set(['pre', 'textarea'])
QUOTE_TAGS = {'script' : None, 'textarea' : None}
#According to the HTML standard, each of these inline tags can
#contain another tag of the same type. Furthermore, it's common
#to actually use these tags this way.
NESTABLE_INLINE_TAGS = ('span', 'font', 'q', 'object', 'bdo', 'sub', 'sup',
'center')
#According to the HTML standard, these block tags can contain
#another tag of the same type. Furthermore, it's common
#to actually use these tags this way.
NESTABLE_BLOCK_TAGS = ('blockquote', 'div', 'fieldset', 'ins', 'del')
#Lists can contain other lists, but there are restrictions.
NESTABLE_LIST_TAGS = { 'ol' : [],
'ul' : [],
'li' : ['ul', 'ol'],
'dl' : [],
'dd' : ['dl'],
'dt' : ['dl'] }
#Tables can contain other tables, but there are restrictions.
NESTABLE_TABLE_TAGS = {'table' : [],
'tr' : ['table', 'tbody', 'tfoot', 'thead'],
'td' : ['tr'],
'th' : ['tr'],
'thead' : ['table'],
'tbody' : ['table'],
'tfoot' : ['table'],
}
NON_NESTABLE_BLOCK_TAGS = ('address', 'form', 'p', 'pre')
#If one of these tags is encountered, all tags up to the next tag of
#this type are popped.
RESET_NESTING_TAGS = buildTagMap(None, NESTABLE_BLOCK_TAGS, 'noscript',
NON_NESTABLE_BLOCK_TAGS,
NESTABLE_LIST_TAGS,
NESTABLE_TABLE_TAGS)
NESTABLE_TAGS = buildTagMap([], NESTABLE_INLINE_TAGS, NESTABLE_BLOCK_TAGS,
NESTABLE_LIST_TAGS, NESTABLE_TABLE_TAGS)
# Used to detect the charset in a META tag; see start_meta
CHARSET_RE = re.compile("((^|;)\s*charset=)([^;]*)", re.M)
def start_meta(self, attrs):
"""Beautiful Soup can detect a charset included in a META tag,
try to convert the document to that charset, and re-parse the
document from the beginning."""
httpEquiv = None
contentType = None
contentTypeIndex = None
tagNeedsEncodingSubstitution = False
for i in range(0, len(attrs)):
key, value = attrs[i]
key = key.lower()
if key == 'http-equiv':
httpEquiv = value
elif key == 'content':
contentType = value
contentTypeIndex = i
if httpEquiv and contentType: # It's an interesting meta tag.
match = self.CHARSET_RE.search(contentType)
if match:
if (self.declaredHTMLEncoding is not None or
self.originalEncoding == self.fromEncoding):
# An HTML encoding was sniffed while converting
# the document to Unicode, or an HTML encoding was
# sniffed during a previous pass through the
# document, or an encoding was specified
# explicitly and it worked. Rewrite the meta tag.
def rewrite(match):
return match.group(1) + "%SOUP-ENCODING%"
newAttr = self.CHARSET_RE.sub(rewrite, contentType)
attrs[contentTypeIndex] = (attrs[contentTypeIndex][0],
newAttr)
tagNeedsEncodingSubstitution = True
else:
# This is our first pass through the document.
# Go through it again with the encoding information.
newCharset = match.group(3)
if newCharset and newCharset != self.originalEncoding:
self.declaredHTMLEncoding = newCharset
self._feed(self.declaredHTMLEncoding)
raise StopParsing
pass
tag = self.unknown_starttag("meta", attrs)
if tag and tagNeedsEncodingSubstitution:
tag.containsSubstitutions = True
class StopParsing(Exception):
pass
class ICantBelieveItsBeautifulSoup(BeautifulSoup):
"""The BeautifulSoup class is oriented towards skipping over
common HTML errors like unclosed tags. However, sometimes it makes
errors of its own. For instance, consider this fragment:
<b>Foo<b>Bar</b></b>
This is perfectly valid (if bizarre) HTML. However, the
BeautifulSoup class will implicitly close the first b tag when it
encounters the second 'b'. It will think the author wrote
"<b>Foo<b>Bar", and didn't close the first 'b' tag, because
there's no real-world reason to bold something that's already
bold. When it encounters '</b></b>' it will close two more 'b'
tags, for a grand total of three tags closed instead of two. This
can throw off the rest of your document structure. The same is
true of a number of other tags, listed below.
It's much more common for someone to forget to close a 'b' tag
than to actually use nested 'b' tags, and the BeautifulSoup class
handles the common case. This class handles the not-co-common
case: where you can't believe someone wrote what they did, but
it's valid HTML and BeautifulSoup screwed up by assuming it
wouldn't be."""
I_CANT_BELIEVE_THEYRE_NESTABLE_INLINE_TAGS = \
('em', 'big', 'i', 'small', 'tt', 'abbr', 'acronym', 'strong',
'cite', 'code', 'dfn', 'kbd', 'samp', 'strong', 'var', 'b',
'big')
I_CANT_BELIEVE_THEYRE_NESTABLE_BLOCK_TAGS = ('noscript',)
NESTABLE_TAGS = buildTagMap([], BeautifulSoup.NESTABLE_TAGS,
I_CANT_BELIEVE_THEYRE_NESTABLE_BLOCK_TAGS,
I_CANT_BELIEVE_THEYRE_NESTABLE_INLINE_TAGS)
class MinimalSoup(BeautifulSoup):
"""The MinimalSoup class is for parsing HTML that contains
pathologically bad markup. It makes no assumptions about tag
nesting, but it does know which tags are self-closing, that
<script> tags contain Javascript and should not be parsed, that
META tags may contain encoding information, and so on.
This also makes it better for subclassing than BeautifulStoneSoup
or BeautifulSoup."""
RESET_NESTING_TAGS = buildTagMap('noscript')
NESTABLE_TAGS = {}
class BeautifulSOAP(BeautifulStoneSoup):
"""This class will push a tag with only a single string child into
the tag's parent as an attribute. The attribute's name is the tag
name, and the value is the string child. An example should give
the flavor of the change:
<foo><bar>baz</bar></foo>
=>
<foo bar="baz"><bar>baz</bar></foo>
You can then access fooTag['bar'] instead of fooTag.barTag.string.
This is, of course, useful for scraping structures that tend to
use subelements instead of attributes, such as SOAP messages. Note
that it modifies its input, so don't print the modified version
out.
I'm not sure how many people really want to use this class; let me
know if you do. Mainly I like the name."""
def popTag(self):
if len(self.tagStack) > 1:
tag = self.tagStack[-1]
parent = self.tagStack[-2]
parent._getAttrMap()
if (isinstance(tag, Tag) and len(tag.contents) == 1 and
isinstance(tag.contents[0], NavigableString) and
not parent.attrMap.has_key(tag.name)):
parent[tag.name] = tag.contents[0]
BeautifulStoneSoup.popTag(self)
#Enterprise class names! It has come to our attention that some people
#think the names of the Beautiful Soup parser classes are too silly
#and "unprofessional" for use in enterprise screen-scraping. We feel
#your pain! For such-minded folk, the Beautiful Soup Consortium And
#All-Night Kosher Bakery recommends renaming this file to
#"RobustParser.py" (or, in cases of extreme enterprisiness,
#"RobustParserBeanInterface.class") and using the following
#enterprise-friendly class aliases:
class RobustXMLParser(BeautifulStoneSoup):
pass
class RobustHTMLParser(BeautifulSoup):
pass
class RobustWackAssHTMLParser(ICantBelieveItsBeautifulSoup):
pass
class RobustInsanelyWackAssHTMLParser(MinimalSoup):
pass
class SimplifyingSOAPParser(BeautifulSOAP):
pass
######################################################
#
# Bonus library: Unicode, Dammit
#
# This class forces XML data into a standard format (usually to UTF-8
# or Unicode). It is heavily based on code from Mark Pilgrim's
# Universal Feed Parser. It does not rewrite the XML or HTML to
# reflect a new encoding: that happens in BeautifulStoneSoup.handle_pi
# (XML) and BeautifulSoup.start_meta (HTML).
# Autodetects character encodings.
# Download from http://chardet.feedparser.org/
try:
import chardet
# import chardet.constants
# chardet.constants._debug = 1
except ImportError:
chardet = None
# cjkcodecs and iconv_codec make Python know about more character encodings.
# Both are available from http://cjkpython.i18n.org/
# They're built in if you use Python 2.4.
try:
import cjkcodecs.aliases
except ImportError:
pass
try:
import iconv_codec
except ImportError:
pass
class UnicodeDammit:
"""A class for detecting the encoding of a *ML document and
converting it to a Unicode string. If the source encoding is
windows-1252, can replace MS smart quotes with their HTML or XML
equivalents."""
# This dictionary maps commonly seen values for "charset" in HTML
# meta tags to the corresponding Python codec names. It only covers
# values that aren't in Python's aliases and can't be determined
# by the heuristics in find_codec.
CHARSET_ALIASES = { "macintosh" : "mac-roman",
"x-sjis" : "shift-jis" }
def __init__(self, markup, overrideEncodings=[],
smartQuotesTo='xml', isHTML=False):
self.declaredHTMLEncoding = None
self.markup, documentEncoding, sniffedEncoding = \
self._detectEncoding(markup, isHTML)
self.smartQuotesTo = smartQuotesTo
self.triedEncodings = []
if markup == '' or isinstance(markup, unicode):
self.originalEncoding = None
self.unicode = unicode(markup)
return
u = None
for proposedEncoding in overrideEncodings:
u = self._convertFrom(proposedEncoding)
if u: break
if not u:
for proposedEncoding in (documentEncoding, sniffedEncoding):
u = self._convertFrom(proposedEncoding)
if u: break
# If no luck and we have auto-detection library, try that:
if not u and chardet and not isinstance(self.markup, unicode):
u = self._convertFrom(chardet.detect(self.markup)['encoding'])
# As a last resort, try utf-8 and windows-1252:
if not u:
for proposed_encoding in ("utf-8", "windows-1252"):
u = self._convertFrom(proposed_encoding)
if u: break
self.unicode = u
if not u: self.originalEncoding = None
def _subMSChar(self, orig):
"""Changes a MS smart quote character to an XML or HTML
entity."""
sub = self.MS_CHARS.get(orig)
if isinstance(sub, tuple):
if self.smartQuotesTo == 'xml':
sub = '&#x%s;' % sub[1]
else:
sub = '&%s;' % sub[0]
return sub
def _convertFrom(self, proposed):
proposed = self.find_codec(proposed)
if not proposed or proposed in self.triedEncodings:
return None
self.triedEncodings.append(proposed)
markup = self.markup
# Convert smart quotes to HTML if coming from an encoding
# that might have them.
if self.smartQuotesTo and proposed.lower() in("windows-1252",
"iso-8859-1",
"iso-8859-2"):
markup = re.compile("([\x80-\x9f])").sub \
(lambda(x): self._subMSChar(x.group(1)),
markup)
try:
# print "Trying to convert document to %s" % proposed
u = self._toUnicode(markup, proposed)
self.markup = u
self.originalEncoding = proposed
except Exception, e:
# print "That didn't work!"
# print e
return None
#print "Correct encoding: %s" % proposed
return self.markup
def _toUnicode(self, data, encoding):
'''Given a string and its encoding, decodes the string into Unicode.
%encoding is a string recognized by encodings.aliases'''
# strip Byte Order Mark (if present)
if (len(data) >= 4) and (data[:2] == '\xfe\xff') \
and (data[2:4] != '\x00\x00'):
encoding = 'utf-16be'
data = data[2:]
elif (len(data) >= 4) and (data[:2] == '\xff\xfe') \
and (data[2:4] != '\x00\x00'):
encoding = 'utf-16le'
data = data[2:]
elif data[:3] == '\xef\xbb\xbf':
encoding = 'utf-8'
data = data[3:]
elif data[:4] == '\x00\x00\xfe\xff':
encoding = 'utf-32be'
data = data[4:]
elif data[:4] == '\xff\xfe\x00\x00':
encoding = 'utf-32le'
data = data[4:]
newdata = unicode(data, encoding)
return newdata
def _detectEncoding(self, xml_data, isHTML=False):
"""Given a document, tries to detect its XML encoding."""
xml_encoding = sniffed_xml_encoding = None
try:
if xml_data[:4] == '\x4c\x6f\xa7\x94':
# EBCDIC
xml_data = self._ebcdic_to_ascii(xml_data)
elif xml_data[:4] == '\x00\x3c\x00\x3f':
# UTF-16BE
sniffed_xml_encoding = 'utf-16be'
xml_data = unicode(xml_data, 'utf-16be').encode('utf-8')
elif (len(xml_data) >= 4) and (xml_data[:2] == '\xfe\xff') \
and (xml_data[2:4] != '\x00\x00'):
# UTF-16BE with BOM
sniffed_xml_encoding = 'utf-16be'
xml_data = unicode(xml_data[2:], 'utf-16be').encode('utf-8')
elif xml_data[:4] == '\x3c\x00\x3f\x00':
# UTF-16LE
sniffed_xml_encoding = 'utf-16le'
xml_data = unicode(xml_data, 'utf-16le').encode('utf-8')
elif (len(xml_data) >= 4) and (xml_data[:2] == '\xff\xfe') and \
(xml_data[2:4] != '\x00\x00'):
# UTF-16LE with BOM
sniffed_xml_encoding = 'utf-16le'
xml_data = unicode(xml_data[2:], 'utf-16le').encode('utf-8')
elif xml_data[:4] == '\x00\x00\x00\x3c':
# UTF-32BE
sniffed_xml_encoding = 'utf-32be'
xml_data = unicode(xml_data, 'utf-32be').encode('utf-8')
elif xml_data[:4] == '\x3c\x00\x00\x00':
# UTF-32LE
sniffed_xml_encoding = 'utf-32le'
xml_data = unicode(xml_data, 'utf-32le').encode('utf-8')
elif xml_data[:4] == '\x00\x00\xfe\xff':
# UTF-32BE with BOM
sniffed_xml_encoding = 'utf-32be'
xml_data = unicode(xml_data[4:], 'utf-32be').encode('utf-8')
elif xml_data[:4] == '\xff\xfe\x00\x00':
# UTF-32LE with BOM
sniffed_xml_encoding = 'utf-32le'
xml_data = unicode(xml_data[4:], 'utf-32le').encode('utf-8')
elif xml_data[:3] == '\xef\xbb\xbf':
# UTF-8 with BOM
sniffed_xml_encoding = 'utf-8'
xml_data = unicode(xml_data[3:], 'utf-8').encode('utf-8')
else:
sniffed_xml_encoding = 'ascii'
pass
except:
xml_encoding_match = None
xml_encoding_match = re.compile(
'^<\?.*encoding=[\'"](.*?)[\'"].*\?>').match(xml_data)
if not xml_encoding_match and isHTML:
regexp = re.compile('<\s*meta[^>]+charset=([^>]*?)[;\'">]', re.I)
xml_encoding_match = regexp.search(xml_data)
if xml_encoding_match is not None:
xml_encoding = xml_encoding_match.groups()[0].lower()
if isHTML:
self.declaredHTMLEncoding = xml_encoding
if sniffed_xml_encoding and \
(xml_encoding in ('iso-10646-ucs-2', 'ucs-2', 'csunicode',
'iso-10646-ucs-4', 'ucs-4', 'csucs4',
'utf-16', 'utf-32', 'utf_16', 'utf_32',
'utf16', 'u16')):
xml_encoding = sniffed_xml_encoding
return xml_data, xml_encoding, sniffed_xml_encoding
def find_codec(self, charset):
return self._codec(self.CHARSET_ALIASES.get(charset, charset)) \
or (charset and self._codec(charset.replace("-", ""))) \
or (charset and self._codec(charset.replace("-", "_"))) \
or charset
def _codec(self, charset):
if not charset: return charset
codec = None
try:
codecs.lookup(charset)
codec = charset
except (LookupError, ValueError):
pass
return codec
EBCDIC_TO_ASCII_MAP = None
def _ebcdic_to_ascii(self, s):
c = self.__class__
if not c.EBCDIC_TO_ASCII_MAP:
emap = (0,1,2,3,156,9,134,127,151,141,142,11,12,13,14,15,
16,17,18,19,157,133,8,135,24,25,146,143,28,29,30,31,
128,129,130,131,132,10,23,27,136,137,138,139,140,5,6,7,
144,145,22,147,148,149,150,4,152,153,154,155,20,21,158,26,
32,160,161,162,163,164,165,166,167,168,91,46,60,40,43,33,
38,169,170,171,172,173,174,175,176,177,93,36,42,41,59,94,
45,47,178,179,180,181,182,183,184,185,124,44,37,95,62,63,
186,187,188,189,190,191,192,193,194,96,58,35,64,39,61,34,
195,97,98,99,100,101,102,103,104,105,196,197,198,199,200,
201,202,106,107,108,109,110,111,112,113,114,203,204,205,
206,207,208,209,126,115,116,117,118,119,120,121,122,210,
211,212,213,214,215,216,217,218,219,220,221,222,223,224,
225,226,227,228,229,230,231,123,65,66,67,68,69,70,71,72,
73,232,233,234,235,236,237,125,74,75,76,77,78,79,80,81,
82,238,239,240,241,242,243,92,159,83,84,85,86,87,88,89,
90,244,245,246,247,248,249,48,49,50,51,52,53,54,55,56,57,
250,251,252,253,254,255)
import string
c.EBCDIC_TO_ASCII_MAP = string.maketrans( \
''.join(map(chr, range(256))), ''.join(map(chr, emap)))
return s.translate(c.EBCDIC_TO_ASCII_MAP)
MS_CHARS = { '\x80' : ('euro', '20AC'),
'\x81' : ' ',
'\x82' : ('sbquo', '201A'),
'\x83' : ('fnof', '192'),
'\x84' : ('bdquo', '201E'),
'\x85' : ('hellip', '2026'),
'\x86' : ('dagger', '2020'),
'\x87' : ('Dagger', '2021'),
'\x88' : ('circ', '2C6'),
'\x89' : ('permil', '2030'),
'\x8A' : ('Scaron', '160'),
'\x8B' : ('lsaquo', '2039'),
'\x8C' : ('OElig', '152'),
'\x8D' : '?',
'\x8E' : ('#x17D', '17D'),
'\x8F' : '?',
'\x90' : '?',
'\x91' : ('lsquo', '2018'),
'\x92' : ('rsquo', '2019'),
'\x93' : ('ldquo', '201C'),
'\x94' : ('rdquo', '201D'),
'\x95' : ('bull', '2022'),
'\x96' : ('ndash', '2013'),
'\x97' : ('mdash', '2014'),
'\x98' : ('tilde', '2DC'),
'\x99' : ('trade', '2122'),
'\x9a' : ('scaron', '161'),
'\x9b' : ('rsaquo', '203A'),
'\x9c' : ('oelig', '153'),
'\x9d' : '?',
'\x9e' : ('#x17E', '17E'),
'\x9f' : ('Yuml', ''),}
#######################################################################
#By default, act as an HTML pretty-printer.
if __name__ == '__main__':
import sys
soup = BeautifulSoup(sys.stdin)
print soup.prettify()
| Python |
'''
Created on May 2, 2010
@author: StevenNorris
'''
import SavannahParsers
import sys
import traceback
def run(utils,data_source_id):
#Parses indexes for projects
print("\nParsing Indexes")
job=utils.get_job(data_source_id,'indexparsing')
if (utils.error):
sys.exit()
while (job!=None):
try:
#parses and updates database
print("Parsing for "+job[0])
index_html=utils.get_index_html(job[0],data_source_id)
index_html=index_html[0]
description=SavannahParsers.parse_index(index_html)
id_num=SavannahParsers.parse_project_id(index_html)
dev_count=SavannahParsers.parse_member_num(index_html)
long_name=SavannahParsers.parse_project_longname(index_html)
group=SavannahParsers.parse_group_type(index_html)
mail=SavannahParsers.parse_mailing_lists(index_html)
bugs=SavannahParsers.parse_bugs(index_html)
tech=SavannahParsers.parse_tech(index_html)
looking=SavannahParsers.parse_looking(index_html)
task=SavannahParsers.parse_task(index_html)
patch=SavannahParsers.parse_patch(index_html)
license=SavannahParsers.parse_license(index_html)
status=SavannahParsers.parse_dev_status(index_html)
update='''UPDATE sv_projects
SET description=%s,
id_num=%s,
project_dev_count=%s,
project_long_name=%s,
project_group_type=%s,
number_of_mailing_lists=%s,
bugs_open=%s,
bugs_total=%s,
techsupp_open=%s,
techsupp_total=%s,
looking_for_number=%s,
taskmgr_open=%s,
taskmgr_total=%s,
patchmgr_open=%s,
patchmgr_total=%s,
license=%s,
development_status=%s,
date_collected=NOW()
WHERE datasource_id=%s
AND project_name=%s
AND gnu_or_non=%s'''
utils.db_insert(update,description,id_num,dev_count,long_name,group,mail,bugs[0],bugs[1],tech[0],tech[1],looking,
task[0],task[1],patch[0],patch[1],license,status,data_source_id,job[0],job[1])
#change status, get new job, and check for errors
utils.change_status('skillsparsing','indexparsing',data_source_id,job[0])
job=utils.get_job(data_source_id,'indexparsing')
if (utils.error):
sys.exit()
#posts error in case of faulty index parsing
except:
print('!!!!WARNING!!!! Index pages did not parse correctly.')
utils.post_error(traceback.format_exc(),job[0],data_source_id,job[1])
job=utils.get_job(data_source_id,'indexparsing')
if (utils.error):
sys.exit()
#parses skills pages for projects
print("\nParsing Skills")
job=utils.get_job(data_source_id,'skillsparsing')
if (utils.error):
sys.exit()
while (job!=None):
try:
#gathers a list of members
members=utils.get_members(job[0],data_source_id)
error=False
for member in members:
error=False
try:
#parses name and description for each member, then updates database
member=member[0]
print("Parsing for "+member+" for project "+job[0])
skillshtml=utils.get_skills_html(member,data_source_id)
skillshtml=skillshtml[0]
name=SavannahParsers.parse_member_name(skillshtml)
description=SavannahParsers.parse_member_description(skillshtml)
infohtml=utils.get_info_html(member,data_source_id)
infohtml=infohtml[0]
member_since=SavannahParsers.parse_time(infohtml)
update='''UPDATE sv_developers SET real_name=%s, description=%s, member_since=%s, date_collected=NOW()
WHERE dev_loginname=%s
AND datasource_id=%s'''
utils.db_insert(update,name,description,member_since,member,data_source_id)
#parses skills for each member then updates database
skill_sets=SavannahParsers.parse_skills(skillshtml)
for skillset in skill_sets:
insert='''INSERT IGNORE INTO sv_dev_skills (datasource_id,dev_loginname,skill,level,experience,date_collected)
VALUES(%s,%s,%s,%s,%s,NOW())'''
utils.db_insert(insert,data_source_id,member,skillset[0],skillset[1],skillset[2])
#posts error in case of faulty member parsing
except:
print('!!!!WARNING!!!! Skills pages did not parse correctly.')
utils.post_error(traceback.format_exc(),job[0],data_source_id,job[1])
error=True
if (utils.error):
sys.exit()
#checks for faulty parsing on one member, so as not to change error status if it exists
if(not error):
utils.change_status('completed','skillsparsing',data_source_id,job[0])
#gathers new job and checks for errors
job=utils.get_job(data_source_id,'skillsparsing')
if (utils.error):
sys.exit()
#posts error in case of faulty skills parsing
except:
print('!!!!WARNING!!!! Skills pages did not parse correctly.')
utils.post_error(traceback.format_exc(),job[0],data_source_id,job[1])
job=utils.get_job(data_source_id,'skillsparsing')
if (utils.error):
sys.exit() | Python |
'''
Created on May 2, 2010
This module performs the clean up for skills jobs.
@author: StevenNorris
'''
import sys
import traceback
def run(utils,datasource_id):
#Cleans up skills jobs
print("\nStarting skills clean up.")
#Gets Job
job=utils.get_cleanup_job(datasource_id,'skillsHTML')
if(utils.error):
sys.exit()
while(job!=None):
#Cleans up for the job
try:
unixname=job[0]
print "Cleaning up for "+unixname
utils.delete_skills(unixname,datasource_id)
utils.change_status('skillsHTML','Clean_Up',datasource_id,unixname)
job=utils.get_cleanup_job(datasource_id,'skillsHTML')
if(utils.error):
sys.exit()
#If process fails, post error and get new job
except:
print("!!!!WARNING!!!! Clean up skills for "+unixname+" failed.")
utils.post_error('Clean_Up(skills):\n'+traceback.format_exc(),datasource_id,unixname)
job=utils.get_cleanup_job(datasource_id,'skillsHTML')
if(utils.error):
sys.exit()
| Python |
'''
Created on Jun 5, 2009
@author: Steven Norris
This module provides basic utilities for the FLOSS mole spiders.
'''
import MySQLdb
#import httplib
import traceback
import urllib2
class SavannahUtils:
#forms initial connection with database
def __init__(self,file_name):
try:
dbfile = open(file_name, 'r')
except:
raise Exception("Database file error: dbInfo.txt")
try:
self.host = dbfile.readline().strip()
self.port = int(dbfile.readline().strip())
self.username = dbfile.readline().strip()
self.password = dbfile.readline().strip()
self.database = dbfile.readline().strip()
self.db=MySQLdb.connect(host=self.host, user=self.username, passwd=self.password, db=self.database)
self.cursor = self.db.cursor()
self.error=False
except:
print("!!!WARNINGS!!! Database connection failed.")
'''
This method provides the ability to gather a page
'''
def get_page(self,url):
try:
response = urllib2.urlopen(url)
html = response.read()
return html
except:
print ("The page request failed.")
# try:
# conn=httplib.HTTPConnection('savannah.gnu.org')
# conn.request("GET",url)
# resp=conn.getresponse()
# html_page=resp.read()
#html_page=str(html_page)
# conn.close()
# return html_page
# except:
# print ("!!!WARNING!!! The page request failed.")
'''
This method provides the ability to insert into a database
'''
def db_insert(self,query_string,*params):
try:
self.cursor.execute(query_string, params)
except:
print("!!!!WARNING!!!! Insertion into "+self.database+" failed.\n")
'''
This method provides the ability to get a job from the job database.
'''
def get_job(self, datasource_id, status):
lock = '''LOCK TABLE sv_jobs READ, sv_jobs AS t WRITE'''
select = '''SELECT target_name,GNU_NON,link
FROM sv_jobs AS t
WHERE status = %s
AND datasource_id = %s
LIMIT 1'''
update='''UPDATE sv_jobs AS t
SET status='in_progress', last_modified=NOW()
WHERE datasource_id=%s
AND target_name=%s
AND GNU_NON=%s'''
unlock = '''UNLOCK TABLES'''
try:
self.cursor.execute(lock)
self.cursor.execute(select, (status,datasource_id))
result = self.cursor.fetchone()
self.cursor.execute(update,(datasource_id,result[0],result[1]))
self.cursor.execute(unlock)
return result
except:
print ("Finding job failed.")
self.cursor.execute(unlock)
#this method allows for status changes
def change_status(self,status,previous_stage,datasource_id,target):
update='''UPDATE sv_jobs
SET status=%s, previous_stage=%s, last_modified=NOW()
WHERE datasource_id=%s
AND target_name=%s'''
try:
self.cursor.execute(update,(status,previous_stage,datasource_id,target))
except:
print('!!!!WARNING!!!! Status '+status+' did not update correctly for '+target+' '+datasource_id+'.')
self.error=True
#this method allows for the retrieval of a list of members for a project
def get_members(self,project_name,datasource_id):
try:
gather='''SELECT dev_loginname FROM sv_developer_projects
WHERE project_name=%s
AND datasource_id=%s'''
self.cursor.execute(gather,(project_name,datasource_id))
return self.cursor.fetchall()
except:
print("!!!WARNING!!! Retrieving members failed.")
#this method allows for the retrieval of the membershtml
def get_member_html(self,project,datasource_id):
try:
gather='''SELECT memberhtml FROM sv_project_indexes
WHERE project_name=%s AND datasource_id=%s
LIMIT 1'''
self.cursor.execute(gather,(project,datasource_id))
return self.cursor.fetchone()
except:
print("!!!WARNING!!! Retrieving memberhtml failed.")
#this method allows for the retrieval of the indexhtml
def get_index_html(self,project,datasource_id):
try:
gather='''SELECT indexhtml FROM sv_project_indexes
WHERE project_name=%s AND datasource_id=%s
LIMIT 1'''
self.cursor.execute(gather,(project,datasource_id))
return self.cursor.fetchone()
except:
print("!!!WARNING!!! Retrieving indexhtml failed.")
#this method allows for the retrieval of the skillshtml
def get_skills_html(self,username,datasource_id):
try:
gather='''SELECT skillshtml FROM sv_developers
WHERE dev_loginname=%s AND datasource_id=%s
LIMIT 1'''
self.cursor.execute(gather,(username,datasource_id))
return self.cursor.fetchone()
except:
print("!!!Warning!!! Retrieving skillshtml failed.")
#this method allows for the retrieval of the infohtml
def get_info_html(self,username,datasource_id):
try:
gather='''SELECT infohtml FROM sv_developers
WHERE dev_loginname=%s AND datasource_id=%s
LIMIT 1'''
self.cursor.execute(gather,(username,datasource_id))
return self.cursor.fetchone()
except:
print("!!!Warning!!! Retrieving skillshtml failed.")
#this method allows for error posting
def post_error(self,message,target,datasource_id,type):
update='''UPDATE sv_jobs
SET error_msg=%s, status='error', last_modified=NOW()
WHERE datasource_id=%s
AND target_name=%s
AND GNU_NON=%s'''
gather='''SELECT status FROM sv_jobs
WHERE datasource_id=%s
AND target_name=%s
AND GNU_NON=%s
LIMIT 1'''
try:
self.cursor.execute(gather,(datasource_id,target,type))
fail_stage=self.cursor.fetchone()
fail_stage=fail_stage[0]
message=fail_stage+":\n"+message
self.cursor.execute(update,(message,datasource_id,target,type))
except:
print('!!!!WARNING!!!! Error '+message+'could not be posted to '+target+'.')
self.error=True
'''
This method provides the ability to get a job from the job database.
'''
def get_cleanup_job(self, datasource_id, status):
lock = '''LOCK TABLE sv_jobs READ, sv_jobs AS t WRITE'''
select = '''SELECT target_name,GNU_NON,link
FROM sv_jobs AS t
WHERE status = "In_Progress"
AND previous_stage = %s
AND datasource_id = %s
LIMIT 1'''
update='''UPDATE sv_jobs AS t
SET status='Clean_Up', last_modified=NOW()
WHERE datasource_id=%s
AND target_name=%s'''
unlock = '''UNLOCK TABLES'''
try:
self.cursor.execute(lock)
self.cursor.execute(select, (status,datasource_id))
result = self.cursor.fetchone()
self.cursor.execute(update,(datasource_id,result[0]))
self.cursor.execute(unlock)
return result
except:
print ("Finding job failed.")
self.cursor.execute(unlock)
#This method allows for the deletion of a project and it's indexes
def delete_index(self,unixname,datasource_id):
try:
update="""DELETE FROM sv_project_indexes WHERE project_name=%s AND datasource_id=%s"""
update2="""DELETE FROM sv_projects WHERE project_name=%s AND datasource_id=%s"""
self.cursor.execute(update,(unixname,datasource_id))
self.cursor.execute(update2,(unixname,datasource_id))
except:
print("!!!!WARNING!!!! Deletion of index failed.")
print (traceback.format_exc())
#This method allows for the deletion of a project's skills indexes
def delete_skills(self,unixname,datasource_id):
try:
update="""DELETE FROM sv_developer_projects WHERE project_name=%s AND datasource_id=%s"""
self.cursor.execute(update,(unixname,datasource_id))
except:
print("!!!!WARNING!!!! Deletion of index failed.")
print (traceback.format_exc()) | Python |
'''
Created on May 26, 2009
@author: Steven Norris
This program runs as a spider for the the savannah.gnu.org to add information about
both the GNU projects and non-GNU projects to a database for further investigation.
RUN INSTRUCTIONS
Run from command line using this format
[Interpret] SavannahSpider.py [DatasourceID] [Test mode True/False]
Test mode is based on string comparison so make sure capitalization and spelling are exact.
'''
from SavannahUtils import SavannahUtils
import sys
import SavannahIndex
import SavannahSkills
import SavannahParsing
'''
Runs the spiders for savannah.gnu.org
'''
def main(argv):
try:
data_source_id=argv[1]
test=argv[2]
except:
print("Format arguments thusly: [program] [datasource_id] [True/False(TestMode)]")
sys.exit()
#checks for test mode
if(test=='True'):
utils=SavannahUtils("dbInfoTest.txt")
else:
utils=SavannahUtils("dbInfo.txt")
#does the spidering
SavannahIndex.run(utils,data_source_id)
SavannahSkills.run(utils,data_source_id)
SavannahParsing.run(utils,data_source_id)
main(sys.argv)
| Python |
'''
Created on May 2, 2010
This module performs the clean up for parsing jobs.
@author: StevenNorris
'''
import sys
import traceback
def run(utils,datasource_id):
#Cleans up parsing jobs
print("\nStarting parsing clean up.")
#Gets jobs for indexParsing
print("\nIndex parsing clean up.")
job=utils.get_cleanup_job(datasource_id,'indexparsing')
if(utils.error):
sys.exit()
while(job!=None):
#Cleans up for the job
try:
unixname=job[0]
print "Cleaning up for "+unixname
utils.change_status('indexparsing','Clean_Up',datasource_id,unixname)
job=utils.get_cleanup_job(datasource_id,'indexparsing')
if(utils.error):
sys.exit()
#If process fails, post error and get new job
except:
print("!!!!WARNING!!!! Clean up skills for "+unixname+" failed.")
utils.post_error('Clean_Up(indexParsing):\n'+traceback.format_exc(),datasource_id,unixname)
job=utils.get_cleanup_job(datasource_id,'indexparsing')
if(utils.error):
sys.exit()
#Gets jobs for skillsParsing
print("\nSkills parsing clean up.")
job=utils.get_cleanup_job(datasource_id,'skillsparsing')
if(utils.error):
sys.exit()
while(job!=None):
#Cleans up for the job
try:
unixname=job[0]
print "Cleaning up for "+unixname
utils.change_status('skillsparsing','Clean_Up',datasource_id,unixname)
job=utils.get_cleanup_job(datasource_id,'skillsparsing')
if(utils.error):
sys.exit()
#If process fails, post error and get new job
except:
print("!!!!WARNING!!!! Clean up skills for "+unixname+" failed.")
utils.post_error('Clean_Up(indexParsing):\n'+traceback.format_exc(),datasource_id,unixname)
job=utils.get_cleanup_job(datasource_id,'skillsparsing')
if(utils.error):
sys.exit()
| Python |
'''
Created on Jul 16, 2009
@author: Steven Norris
This program creates the jobs for savannah.gnu.org
RUN INSTRUCTIONS
Run from command line using this format
[Interpret] SavannahJobs.py [DatasourceID] [Test mode True/False]
Test mode is based on string comparison so make sure capitalization and spelling are exact.
'''
from HTMLParser import HTMLParser
import re
from SavannahUtils import SavannahUtils
import sys
BASE_SITE='savannah.gnu.org'
BASE_SITE2='savannah.nongnu.org'
'''
This spider searches the projects page for a list of projects.
'''
class SpiderSavannahProjectsList(HTMLParser):
check_links=[]
#allows for the links to be cleared
def clear_check_links(self):
self.check_links=[]
#handles the start tag for the projects page link and adds the links to check_links
def handle_starttag(self,tag,attrs):
if tag=='a':
link=attrs[0][1]
if re.search('\.\./projects/',link)!=None:
self.check_links.append(link[2:len(link)])
def spiderNumbers(html):
match=re.search("""<a href="/search/\?type_of_search=soft&words=%%%&type=1" class="center">(?P<GNU>\d+?)\D+?</a>""",html)
if(match!=None):
numberGNU=match.group('GNU')
match=re.search("""<a href="/search/\?type_of_search=soft&words=%%%&type=2" class="center">(?P<NON>\d+?)\D+?</a>""",html)
if(match!=None):
numberNonGNU=match.group('NON')
return (numberGNU,numberNonGNU)
else:
return None
else:
return None
def main(argv):
#sets arguments
try:
data_source_id=argv[1]
test=argv[2]
except:
print ("""RUN INSTRUCTIONS\n
Run this module from command line with the following format:\n
[Interpreter] SavannahJobs.py [datasource_id] [Test True/False]\n
Test is a string variable. Be sure to use a capital 'T' to denote test mode.\n
Otherwise use 'F'.""")
sys.exit()
print ("Creating jobs for "+str(data_source_id)+":\n")
#creates spider
spider=SpiderSavannahProjectsList()
#checks for test mode and acts accordingly
if(test=='True'):
print("TEST MODE ACTIVATED")
utils=SavannahUtils("dbInfoTest.txt")
else:
utils=SavannahUtils("dbInfo.txt")
#Getting project numbers
print("Getting project numbers.")
main_page=utils.get_page("http://savannah.gnu.org/")
if(main_page):
numbers=spiderNumbers(main_page);
GNUnum=numbers[0]
NONnum=numbers[1]
#Creates jobs for GNU projects
print("Creating GNU jobs.")
page=utils.get_page('http://'+BASE_SITE+'/search/?type_of_search=soft&words=%2A&type=1&offset=0&max_rows='+GNUnum+'#results')
if(page):
page_string=str(page)
spider.feed(page_string)
if(test=='True'):
end=20
else:
end=len(spider.check_links)
for link in spider.check_links[0:end]:
insert='''INSERT INTO sv_jobs (
target_name,status,datasource_id,GNU_NON,link,last_modified)
VALUES(%s,'indexHTML',%s,'GNU',%s,NOW())'''
try:
utils.db_insert(insert,link[10:len(link)],data_source_id,link)
if (utils.error):
sys.exit()
except:
print("!!!!WARNING!!!! Project "+link+" did not insert correctly into sv_jobs.")
else:
print("!!!!WARNING!!!! Pages not collected properly for GNU projects.")
sys.exit()
#Creates jobs for NON-GNU projects
print("\nCreating NON-GNU jobs.")
spider.clear_check_links()
page=utils.get_page('http://'+BASE_SITE+'/search/?type_of_search=soft&words=%2A&type=2&offset=0&max_rows='+NONnum+'#results')
if(page):
page_string=str(page)
spider.feed(page_string)
if(test=='True'):
end=20
else:
end=len(spider.check_links)
for link in spider.check_links[0:end]:
insert='''INSERT INTO sv_jobs (
target_name,status,datasource_id,GNU_NON,link,last_modified)
VALUES(%s,'indexHTML',%s,'NONGNU',%s,NOW())'''
try:
utils.db_insert(insert,link[10:len(link)],data_source_id,link)
if (utils.error):
sys.exit()
except:
print("!!!!WARNING!!!! Project "+link+" did not insert correctly into sv_jobs.")
else:
print("!!!WARNING!!!! Pages did not collect properly for NON-GNU projects.")
sys.exit()
else:
print("!!!!WARNING!!!! Numbers for projects did not collect properly.")
sys.exit()
main(sys.argv)
| Python |
'''
Created on Jun 14, 2009
@author: Steven Norris
This module parses the html documents found on savannah.
'''
import re
from BeautifulSoup import BeautifulSoup
#parses description from index pages
def parse_index(html):
p=re.compile('<div class="indexcenter\">.+<!-- end indexcenter -->',re.DOTALL)
results=p.findall(html)
if (results):
description=results[0]
description=description[25:len(description)-30]
description=description.replace('<p>','')
description=description.replace('</p>','')
description=description.replace('<br/>','')
description=description.replace('<br />','')
description=description.replace('<a href="http://www.gnu.org/licenses/old-licenses/gpl-2.0.html">','')
description=description.replace('</a>','')
else:
description=None
return description
#parses project's id from index page
def parse_project_id(html):
p=re.compile('Id: <strong>#.+?</strong>')
results=p.findall(html)
if (results):
id=results[0]
id=id[13:len(id)-9]
else:
id=None
return id
#parses the long name for a project from index page
def parse_project_longname(html):
p=re.compile('>Name: <strong>.+?</strong></span></div><div class="boxitemalt">')
results=p.findall(html)
if(results):
name=results[0]
name=name[15:len(name)-46]
if(name==''):
name=None
else:
name=BeautifulSoup(name,convertEntities=BeautifulSoup.HTML_ENTITIES)
name=name.contents[0]
try:
name=name.encode("utf-8")
except:
name='!!!Warning!!! Encoding Error on Longname!'
else:
name=None
return name
#parses the number of members for a project form index
def parse_member_num(html):
p=re.compile('<strong>.+?</strong> active member')
results=p.findall(html)
if(results):
num=results[0]
num=num[8:len(num)-23]
else:
num=None
return num
#parses the group type for a project from index
def parse_group_type(html):
p=re.compile('Group Type: <strong>.+?</strong>')
results=p.findall(html)
if(results):
type=results[0]
type=type[20:len(type)-9]
else:
type=None
return type
#parses the number of mailing lists for a project from index
def parse_mailing_lists(html):
p=re.compile('<strong>.+?</strong> public mailing-list')
results=p.findall(html)
if(results):
lists=results[0]
lists=lists[8:len(lists)-29]
else:
lists=None
return lists
#parses the bugs open and total for a project from index
def parse_bugs(html):
p=re.compile('Bug Tracker</a> (.+?total)')
open=re.compile('<strong>.+?</strong> open item')
total=re.compile(', <strong>.+?</strong> total')
results=p.findall(html)
if (results):
string=results[0]
open_bugs=open.findall(string)
open_bugs=open_bugs[0]
open_bugs=open_bugs[8:len(open_bugs)-19]
total_bugs=total.findall(string)
total_bugs=total_bugs[0]
total_bugs=total_bugs[10:len(total_bugs)-15]
bugs=(open_bugs,total_bugs)
else:
bugs=(None,None)
return bugs
#parses the tech support managers for a project from index
def parse_tech(html):
p=re.compile('Tech Support Manager</a> (.+?total)')
open=re.compile('<strong>.+?</strong> open item')
total=re.compile(', <strong>.+?</strong> total')
results=p.findall(html)
if (results):
string=results[0]
open_tech=open.findall(string)
open_tech=open_tech[0]
open_tech=open_tech[8:len(open_tech)-19]
total_tech=total.findall(string)
total_tech=total_tech[0]
total_tech=total_tech[10:len(total_tech)-15]
tech=(open_tech,total_tech)
else:
tech=(None,None)
return tech
#parses the task managers for a project from index
def parse_task(html):
p=re.compile('Task Manager</a> (.+?total)')
open=re.compile('<strong>.+?</strong> open item')
total=re.compile(', <strong>.+?</strong> total')
results=p.findall(html)
if (results):
string=results[0]
open_task=open.findall(string)
open_task=open_task[0]
open_task=open_task[8:len(open_task)-19]
total_task=total.findall(string)
total_task=total_task[0]
total_task=total_task[10:len(total_task)-15]
tasks=(open_task,total_task)
else:
tasks=(None,None)
return tasks
#parses the patch managers for a project from index
def parse_patch(html):
p=re.compile('Patch Manager</a> (.+?total)')
open=re.compile('<strong>.+?</strong> open item')
total=re.compile(', <strong>.+?</strong> total')
results=p.findall(html)
if (results):
string=results[0]
open_patch=open.findall(string)
open_patch=open_patch[0]
open_patch=open_patch[8:len(open_patch)-19]
total_patch=total.findall(string)
total_patch=total_patch[0]
total_patch=total_patch[10:len(total_patch)-15]
patches=(open_patch,total_patch)
else:
patches=(None,None)
return patches
#parses the people a project is looking for from index
def parse_looking(html):
p=re.compile('<strong>.+?</strong> contributor')
results=p.findall(html)
if(results):
people=results[0]
people=people[8:len(people)-21]
else:
people=None
return people
#parses the license for a project from index
def parse_license(html):
p=re.compile('License: .+?<br />')
p2=re.compile('>.+?</a>')
results=p.findall(html)
if(results):
string=results[0]
license=p2.findall(string)
if(license):
license=license[0]
license=license[1:len(license)-4]
else:
license=string[9:len(string)-6]
else:
license=None
return license
#parses the development status of a project from index
def parse_dev_status(html):
p=re.compile('Development Status: .+?</p>',re.DOTALL)
results=p.findall(html)
if(results):
status=results[0]
status=status[20:len(status)-5]
else:
status=None
return status
#parses member's time from info page
def parse_time(html):
p=re.compile('Site Member Since:.+?</strong>',re.DOTALL)
p2=re.compile('<strong>.+?</strong>')
results=p.findall(html)
if(results):
string=results[0]
time=p2.findall(string)
time=time[0]
time=time[8:len(time)-9]
else:
time=None
return time
#parses member's name from skills pages
def parse_member_name (html):
p=re.compile('<title>People at Savannah: .+ Resume')
results=p.findall(html)
if (results):
name=results[0]
name=name[27:len(name)-7]
else:
name=None
return name
#parse member's description from skills pages
def parse_member_description(html):
p=re.compile('<h3>Resume.+?<h3>',re.DOTALL)
results=p.findall(html)
if(results):
description=results[0]
description=description[15:len(description)-4]
description=description.replace('<p>','')
description=description.replace('</p>','')
description=description.replace('<br/>','')
description=description.replace('<br />','')
else:
description=None
return description
#parses each skill into a triple and adds to an array of skills triples, returns skills array
def parse_skills(html):
skill_table_re=re.compile('<th class="boxtitle">Experience.+</table>',re.DOTALL)
results=skill_table_re.findall(html)
skill_table=results[0]
skillset_re=re.compile('<tr .+?</tr>',re.DOTALL)
skillset=skillset_re.findall(skill_table)
skills=[]
ind_skill_re=re.compile('<td>.+?</td>')
for item in skillset:
ind_skill=ind_skill_re.findall(item)
skills.append(ind_skill)
for i in range(len(skills)):
item=skills[i]
for j in range(len(item)):
item2=item[j]
item2=item2[4:len(item[j])-5]
item[j]=item2
skills[i]=item
return skills | Python |
'''
Created on May 2, 2010
This module spiders the index page of each job and prepares for skills spidering.
@author: Steven Norris
'''
from HTMLParser import HTMLParser
import re
import time
import sys
import traceback
BASE_SITE1='savannah.gnu.org'
BASE_SITE2='savannah.nongnu.org'
'''
This spider gathers the members list for each projects page
'''
class SpiderSavannahMembersList(HTMLParser):
check_link=''
#handles the start tag for the memberslist link and sets it to check_link
def handle_starttag(self,tag,attrs):
if tag=='a':
link=attrs[0][1]
if re.search('memberlist.php',link)!=None:
self.check_link=link
def run(utils,data_source_id):
#Does the index collection jobs for projects
print("\nGathering indexes")
spiderMembers=SpiderSavannahMembersList()
job=utils.get_job(data_source_id,'indexHTML')
if(utils.error):
sys.exit()
while(job!=None):
try:
link=job[2]
type=job[1]
project_name=job[0]
print("Gathering data for "+link+".")
#gets the home page for each project
print("Gathering home page.")
if(type=='gnu'):
BASE_SITE=BASE_SITE1
else:
BASE_SITE=BASE_SITE2
print 'http://'+BASE_SITE+link
page=utils.get_page('http://'+BASE_SITE+link)
if(page):
home_page=str(page)
#finds the members page for the project
print("Gathering members page.")
spiderMembers.feed(home_page)
members_page=utils.get_page('http://'+BASE_SITE+spiderMembers.check_link)
if(page):
members_page=str(members_page)
#Insert the homepage and members page into sv_project_indexes
print("Inserting into sv_project_indexes.")
insert='''INSERT INTO sv_project_indexes (
project_name,datasource_id,indexhtml,memberhtml,date_collected)
VALUES(%s,%s,%s,%s,NOW())'''
utils.db_insert(insert,project_name,data_source_id,home_page,members_page)
#Insert the type into sv_projects
print("Inserting into sv_projects.")
insert='''INSERT INTO sv_projects (
project_name,datasource_id,gnu_or_non,date_collected)
VALUES(%s,%s,%s,NOW())'''
utils.db_insert(insert,project_name,data_source_id,type)
#sleeps then status change and select new job while checking for fatal errors
time.sleep(3)
utils.change_status('skillsHTML','indexHTML',data_source_id,project_name)
job=utils.get_job(data_source_id,'indexHTML')
if (utils.error):
sys.exit()
else:
print("!!!!WARNING!!!! Members page either did not exits or failed to collect for "+project_name)
utils.post_error(traceback.format_exc(),project_name,data_source_id,type)
job=utils.get_job(data_source_id,'indexHTML')
if(utils.error):
sys.exit()
else:
print("!!!!WARNING!!!! Index page either did not exist or failed to collect for "+project_name)
utils.post_error(traceback.format_exc(),project_name,data_source_id,type)
job=utils.get_job(data_source_id,'indexHTML')
if(utils.error):
sys.exit()
#posting error for faulty gathering
except:
print("!!!!WARNING!!!! Index gathering failed for "+project_name+".")
utils.post_error(traceback.format_exc(),project_name,data_source_id,type)
job=utils.get_job(data_source_id,'indexHTML')
if(utils.error):
sys.exit()
| Python |
'''
Created on May 2, 2010
This module performs the clean up for index jobs.
@author: StevenNorris
'''
import sys
import traceback
def run(utils,datasource_id):
#Cleans up index jobs
print("\nStarting index clean up.")
#Gets Job
job=utils.get_cleanup_job(datasource_id,'indexHTML')
if(utils.error):
sys.exit()
while(job!=None):
#Cleans up for the job
try:
unixname=job[0]
print "Cleaning up for "+unixname
utils.delete_index(unixname,datasource_id)
utils.change_status('indexHTML','Clean_Up',datasource_id,unixname)
job=utils.get_cleanup_job(datasource_id,'indexHTML')
if(utils.error):
sys.exit()
#If process fails, post error and get new job
except:
print("!!!!WARNING!!!! Clean up indexes for "+unixname+" failed.")
utils.post_error('Clean_Up(index):\n'+traceback.format_exc(),datasource_id,unixname)
job=utils.get_cleanup_job(datasource_id,'indexHTML')
if(utils.error):
sys.exit()
| Python |
'''
Created on May 2, 2010
This module spiders the index page of each job and prepares for skills spidering.
@author: Steven Norris
'''
from HTMLParser import HTMLParser
import re
import time
import sys
import traceback
BASE_SITE1='savannah.gnu.org'
BASE_SITE2='savannah.nongnu.org'
'''
This spider handles the skills pages in each members page
'''
class SpiderSavannahSkills(HTMLParser):
check_links=[]
#handles the start tag for each skills page and adds it to check_links
def handle_starttag(self,tag,attrs):
if tag=='a':
link=attrs[0][1]
if re.search('resume.php',link)!=None:
self.check_links.append(link)
'''
This class allows for the collection of usernames from the skills pages
'''
class User_Name_Spider(HTMLParser):
check_links=[]
#handles the start tag for each user name and returns it
def handle_starttag(self,tag,attrs):
if tag=='a':
link=attrs[0][1]
if re.search("/users",link)!=None:
self.check_links.append(link)
def run(utils,data_source_id):
#creates needed spiders
spiderSkills=SpiderSavannahSkills()
spiderUserName=User_Name_Spider()
#Does the skills page collection for the projects
print("\nGathering skills pages")
job=utils.get_job(data_source_id,'skillsHTML')
if (utils.error):
sys.exit()
while (job!=None):
try:
error=False
member_html=utils.get_member_html(job[0],data_source_id)
member_html=member_html[0]
spiderSkills.feed(member_html)
#collects the skill pages for each member
print("Gathering skills pages for "+job[0])
if(type=='gnu'):
BASE_SITE=BASE_SITE1
else:
BASE_SITE=BASE_SITE2
for link in spiderSkills.check_links:
print("finding skills page at "+link)
dev_id=link[27:]
skills_page=utils.get_page('http://'+BASE_SITE+link)
if(skills_page):
skills_page=str(skills_page)
spiderUserName.feed(skills_page)
user_name=spiderUserName.check_links[0]
info_page=utils.get_page('http://'+BASE_SITE+user_name)
if(info_page):
info_page=str(info_page)
user_name=user_name[7:]
spiderUserName.check_links=[]
print("Inserting for "+user_name+" on project "+job[0])
#Insert the developer into sv_developers
print("Inserting into sv_developers.")
insert='''INSERT IGNORE INTO sv_developers (datasource_id,dev_loginname,developer_id,skillshtml,infohtml,date_collected)
VALUES(%s,%s,%s,%s,%s,NOW())'''
utils.db_insert(insert,data_source_id,user_name,dev_id,skills_page,info_page)
#Insert the developer into sv_developers_projects
print("Inserting into sv_developer_projects.")
insert='''INSERT INTO sv_developer_projects (datasource_id,dev_loginname,project_name,date_collected)
VALUES(%s,%s,%s,NOW())'''
utils.db_insert(insert,data_source_id,user_name,job[0])
else:
print('!!!!WARNING Skills pages did not collect correctly!!!!')
utils.post_error(traceback.format_exc(),job[0],data_source_id,job[1])
if(utils.error):
sys.exit()
error=True
else:
print('!!!!WARNING Skills pages did not collect correctly!!!!')
utils.post_error(traceback.format_exc(),job[0],data_source_id,job[1])
if(utils.error):
sys.exit()
error=True
#refresh links, sleep, change status, get new job, and check for errors
spiderSkills.check_links=[]
time.sleep(3)
print (error)
if(not error):
utils.change_status('indexparsing','skillsHTML',data_source_id,job[0])
job=utils.get_job(data_source_id,'skillsHTML')
if (utils.error):
sys.exit()
#posts errors in case of faulty skills gathering
except:
print('!!!!WARNING!!!! Skills pages did not collect correctly.')
utils.post_error(traceback.format_exc(),job[0],data_source_id,job[1])
job=utils.get_job(data_source_id,'skillsHTML')
if (utils.error):
sys.exit()
| Python |
'''
Created on May 21, 2010
Performs the clean up for projects left In_Progress for Savannah.
@author: StevenNorris
'''
import sys
from SavannahUtils import SavannahUtils
import SavannahIndexCleanUp
import SavannahSkillsCleanUp
import SavannahParsingCleanUp
def main(argv):
#set variables
try:
datasource_id=argv[1]
test=argv[2]
except:
print("""RUN INSTRUCTIONS
Run this module from command line with the following format:
[Interpreter] SavannahCleanUp.py [datasource_id] [Test True/False]
Test is a string variable. Be sure to use a capital 'T' to denote test mode.
Otherwise use 'F'.""")
sys.exit()
#Checks for test mode
try:
if (test=='T'):
print("TEST MODE ACTIVATED")
utils=SavannahUtils('dbInfoTest.txt')
else:
utils=SavannahUtils('dbInfo.txt')
except:
print("Please create the dbInfo.txt and the dbInfoTest.txt files. See ReadMe for formatting.")
sys.exit()
#running clean up
SavannahIndexCleanUp.run(utils,datasource_id)
SavannahSkillsCleanUp.run(utils,datasource_id)
SavannahParsingCleanUp.run(utils,datasource_id)
main(sys.argv)
| Python |
import urllib
import re
import time
import sys
import gzip
import sqlalchemy
from sqlalchemy import *
import codecs
import warnings
warnings.filterwarnings('ignore')
import ConfigParser
config = ConfigParser.RawConfigParser()
config.read('al.conf')
try:
#config.get('alioth','username')
DB_USER = config.get('alioth','user')
DB_PASS = config.get('alioth','pass')
DB_ADDRESS = config.get('alioth','address')
MESSAGES = config.get('alioth','messages')
MSG_REFS = config.get('alioth','msg_refs')
MAILING_LISTS = config.get('alioth','mailing_lists')
PROJECTS = config.get('alioth','projects')
PROJECT_INDEXES = config.get('alioth','project_indexes')
MAILING_LIST_INDEXES = config.get('alioth','mailing_list_indexes')
MESSAGES_INDEXES = config.get('alioth','messages_indexes')
DATASOURCE = config.getint('alioth','datasource')
except exception as e:
print e
print 'error reading al.conf'
sys.exit(1)
db = create_engine('mysql://'+DB_USER+':'+DB_PASS+'@'+DB_ADDRESS+'?charset=utf8&use_unicode=1')
connection = db.connect()
meta = MetaData()
meta.bind = connection
messages = Table(MESSAGES, meta, autoload=True)
msg_refs = Table(MSG_REFS, meta, autoload=True)
mailing_lists = Table(MAILING_LISTS, meta, autoload=True)
#projects = Table(MESSAGES, meta, autoload=True)
project_indexes = Table(PROJECT_INDEXES, meta, autoload=True)
mailing_list_indexes = Table(MAILING_LIST_INDEXES, meta, autoload=True)
message_indexes = Table(MESSAGES_INDEXES, meta, autoload=True)
#
#more tables
#
'''def uploadMsg(project,mlist,url,msg):
ins = {}
fr = re.search(r'From: (.*?)\n',msg)
date = re.search(r'Date: (.*?)\n',msg)
msg_id = re.search(r'Message-ID: (.*?)\n',msg)
sub = re.search(r'Subject: (.*?)\n',msg)
body = re.search(r'Message-ID: .*?\n(.*)',msg,re.DOTALL)
reply = re.search(r'In-Reply-To: (.*?)\n',msg)
ref = re.findall(r'References: (.*?)\n',msg)
for reference in ref:
########meh
try:
connection.execute(msg_refs.insert().values(datasource_id = DATASOURCE, mailing_list = mlist, message_id = msg_id.group(1).strip(), reference = reference))
except Exception as e:
print e
time.sleep(5)
if reply:
ins['reply_to'] = reply.group(1).strip()
if fr:
ins['sender'] = fr.group(1).strip()
else:
return #no point in continuing if it's not a real message
#ins['mailing_list'] = mlist
#ins['datasource_id'] = DATASOURCE
#ins['url'] = url
if date:
ins['date_sent'] = date.group(1).strip()
if msg_id:
ins['message_id'] = msg_id.group(1).strip()
if sub:
ins['subject'] = sub.group(1).strip()
if body:
ins['body'] = body.group(1).strip()
ins['mailing_list'] = mlist
ins['datasource_id'] = DATASOURCE
ins['url'] = url
try:
connection.execute(messages.insert().values(ins))
except Exception as e:
print e
time.sleep(5)
'''
def downloadTar(mlist,project,url):
#### select distinct url from al_messages where mlist = mlist and datasource_id = datasource;
### if url isn't in the select, we're all good yo.
url_list = connection.execute("SELECT count(url) FROM "+MESSAGES_INDEXES+" WHERE mailing_list = '"+mlist+"' AND datasource_id = "+str(DATASOURCE)+" AND url = '"+url+"';")
if url_list.fetchone()['count(url)'] == 0:
try:
f = urllib.urlopen(url)
g = open('tmp.txt.gz','w')
g.write(f.read())
f.close()
g.close()
gz = gzip.open('tmp.txt.gz','rb')
mail = gz.read()
gz.close()
except Exception as e:
print e
print 'sleeping for 30 seconds before another attempt'
time.sleep(30)
downloadTar(mlist,project,url)
try:
connection.execute(message_indexes.insert().values(datasource_id = DATASOURCE, mailing_list = mlist, list_index = mail, url = url))
except Exception as e:
#print e
#print 'sleeping for 30 seconds before another attempt'
pass
#time.sleep(30)
######got it!
# m = re.compile(r'\nFrom\s+\S+\s+at\s+\S+\s+\S+\s+\S+\s+\d+\s+\d{2}:\d{2}:\d{2} \d+')
# for msg in m.split(mail.strip()):
# uploadMsg(project,mlist,url,msg.strip())
url_list.close()
############
def parseListUrl(mlist,project,url):
f = urllib.urlopen(url)
page = f.read()
f.close()
tars = re.findall('<td><A href="(.*?.txt.gz)">\[ Gzip\'d Text .*? \]</a></td>',page)
count = 0
for tar in tars:
#this way we never pull the latest (current month)
#it is incomplete.
if count > 0:
downloadTar(mlist,project,url+tar)
count += 1
def getMailingLists(project):
try:
#gets project page
f = urllib.urlopen('https://alioth.debian.org/projects/'+str(project))
page = f.read()
f.close()
#picks out mailing list link
groupid = re.search(r'<a href="/mail/\?group\_id=(.+?)"',page)
if not groupid:
return
#gets list of mailing lists
f = urllib.urlopen('https://alioth.debian.org/mail/?group_id='+str(groupid.group(1)))
page = f.read()
f.close()
try:
connection.execute(mailing_list_indexes.insert().values(datasource_id = DATASOURCE, html = page, project = project))
except Exception as e:
print e
print 'something happened, sleeping 5 seconds'
time.sleep(5)
lists = re.findall(r'<a href="(http://lists.alioth.debian.org/pipermail/(.+?)/)">',page)
for mail in lists:
parseListUrl(mail[1],project,mail[0])
except Exception as e:
print e
print 'sleeping 30 seconds and attempting again'
time.sleep(30)
getMailingLists(project)
current = 1
total = 1
while int(current) <= int(total):
print 'on page '+str(current)+' of '+str(total)
try:
f = urllib.urlopen('https://alioth.debian.org/softwaremap/full_list.php?page='+str(current))
page = f.read()
f.close()
except Exception as e:
print e
print 'something failed, sleeping 30 sec'
time.sleep(30)
continue
try:
connection.execute(project_indexes.insert().values(datasource_id = DATASOURCE, page = current, html = page))
except Exception as e:
print e
print 'something failed'
time.sleep(5)
maximum = re.search('<(\d+)></a> </span><hr />',page)
try:
total = int(maximum.group(1))
except:
pass
projects = re.findall('about="https://alioth.debian.org/projects/(.+?)/"',page)
#print len(projects)
for project in projects:
getMailingLists(project)
#otherwise we want to try again
print 'num projects: '+str(len(projects))
if len(projects) > 0:
current += 1
#print str(current) + ' / ' + str(total)
| Python |
import urllib
import re
import time
import sys
import gzip
import sqlalchemy
from sqlalchemy import *
from sqlalchemy import exc
import codecs
from datetime import datetime
from dateutil.parser import parse
import warnings
warnings.filterwarnings('ignore')
import ConfigParser
config = ConfigParser.RawConfigParser()
config.read('al.conf')
try:
#config.get('alioth','username')
DB_USER = config.get('alioth','user')
DB_PASS = config.get('alioth','pass')
DB_ADDRESS = config.get('alioth','address')
MESSAGES = config.get('alioth','messages')
MSG_REFS = config.get('alioth','msg_refs')
MAILING_LISTS = config.get('alioth','mailing_lists')
PROJECTS = config.get('alioth','projects')
PROJECT_INDEXES = config.get('alioth','project_indexes')
MAILING_LIST_INDEXES = config.get('alioth','mailing_list_indexes')
MESSAGES_INDEXES = config.get('alioth','messages_indexes')
DATASOURCE = config.getint('alioth','datasource')
AUDIENCE = config.get('alioth','audience')
OS = config.get('alioth','os')
STATUS = config.get('alioth','status')
LICENSE = config.get('alioth','license')
ENVIRONMENT = config.get('alioth','environment')
TOPIC = config.get('alioth','topic')
LANGUAGE = config.get('alioth','language')
except exception as e:
print e
print 'error reading al.conf'
sys.exit(1)
def uploadMsg(mlist, url,msg):
ins = {}
fr = re.search(r'From: (.*?)\n',msg)
date = re.search(r'Date: (.*?)\n',msg)
msg_id = re.search(r'Message-ID: <(.*?)>',msg)
sub = re.search(r'Subject: (.*?)\n\S+?:',msg,re.DOTALL)
body = re.search(r'\nMessage-ID: .*?\n(.*)',msg,re.DOTALL)
reply = re.search(r'\nIn-Reply-To: <(.*?)>',msg)
ref = re.findall(r'\nReferences: (.*?)\n\S+?:',msg,re.DOTALL)
for reference in ref:
really = re.findall('<(.*?)>',reference)
for each in really:
########meh
try:
#pass
if msg_id:
connection.execute(msg_refs.insert().values(datasource_id = DATASOURCE, mailing_list = mlist, message_id = msg_id.group(1).strip(), reference = each))
except exc.IntegrityError as i:
#print i
pass
except Exception as e:
print msg[:1000]
print ' '
print 'something happened with references, sleepint for 5 sec'
print '\n'
print e
time.sleep(5)
if reply:
ins['reply_to'] = reply.group(1).strip()
if fr:
ins['sender'] = fr.group(1).strip()
else:
return #no point in continuing if it's not a real message
#ins['mailing_list'] = mlist
#ins['datasource_id'] = DATASOURCE
#ins['url'] = url
if date:
try:
ins['date_sent'] = parse(date.group(1).strip())
except:
pass
if msg_id:
ins['message_id'] = msg_id.group(1).strip()
if sub:
ins['subject'] = sub.group(1).strip()
if body:
ins['body'] = body.group(1).strip()
try:
ins['mailing_list'] = mlist
ins['datasource_id'] = DATASOURCE
ins['url'] = url
except:
return
try:
#pass
connection.execute(messages.insert().values(ins))
except exc.IntegrityError as i:
#print i
pass
except Exception as e:
print msg[:1000]
print ' '
print 'something happened with messages, sleeping for 5'
print '\n'
time.sleep(5)
def parseMessages():
msgs_list = connection.execute('SELECT distinct i.url FROM `'+MESSAGES_INDEXES+'` i left outer join `'+MESSAGES+'` m on i.url = m.url WHERE m.url IS NULL;')
for url in msgs_list:
current = connection.execute('SELECT url,mailing_list,list_index FROM `'+MESSAGES_INDEXES+'` WHERE url = "'+url[0]+'";').fetchone()
#print current['list_index']
##r'From treina at styllusconsultoria.com.br Wed Feb 23 19:40:37 2011'
m = re.compile(r'From.*?\d{2}:\d{2}:\d{2} \d{4}')
#m = re.compile(r'From\s+\S+\s+at\s+\S+\s+\S+\s+\S+\s+\d+\s+\d{2}:\d{2}:\d{2} \d+')
for msg in m.split(current['list_index']):
#print msg
#time.sleep(3)
uploadMsg(current['mailing_list'],current['url'],msg.strip())
def parseMailingLists():
projects = connection.execute('SELECT project FROM '+MAILING_LIST_INDEXES+' WHERE datasource_id = '+str(DATASOURCE)+';')
for each in projects:
current = connection.execute('SELECT project,html FROM '+MAILING_LIST_INDEXES+' WHERE datasource_id = '+str(DATASOURCE)+' AND project = "'+each['project']+'";').fetchone()
project = current['project']
html = current['html'][current['html'].find('<tbody>'):current['html'].find('</tbody>')]
lists = re.findall('<a href="http://lists.alioth.debian.org/pipermail/.*?/">(.*?)</a></strong></td><td>(.*?)</td>',html)
for l in lists:
#list name (teh word 'Archives' is parsed out), list desc
try:
connection.execute(mailing_lists.insert().values(datasource_id = DATASOURCE, mailing_list = l[0][:len(l[0])-9], description = l[1], project = project))
except Exception as e:
print e
time.sleep(5)
#print l[0][:len(l[0])-9]+' :: '+l[1]
def parseProject(html):
d = re.search(r'<span property="doap:name">(.*?)</span>',html)
if d:
display_name = d.group(1)
else:
diplay_name = None
s = re.search(r'<span property="doap:short_desc">(.*?)</span>',html)
if s:
short_desc = s.group(1)
else:
short_desc = None
r = re.search(r'<br />Register Date: <strong>(.*?)</strong>',html)
if r:
try:
registered = parse(r.group(1))
except:
registered = None
else:
registered = None
u = re.search(r'<div typeof="doap:Project sioc:Space" about="https://alioth.debian.org/projects/(\S+)/">',html)
if u:
unixname = u.group(1)
else:
unixname = None
try:
connection.execute(projects.insert().values(datasource_id = DATASOURCE, display_name = display_name, unixname = unixname, short_desc = short_desc, registered = registered))
except exc.IntegrityError as i:
pass
except Exception as e:
print e
time.sleep(5)
lists = re.findall('<li>.*?</li>',html)
for li in lists:
items = re.findall(r'[0-9]">(.*?)</a>', li)
ins = {}
table = None
ins['datasource_id'] = DATASOURCE
ins['unixname'] = unixname
if items[0] == 'Intended Audience':
ins['audience'] = items[len(items)-1]
table = al_audience
elif items[0] == 'Operating System':
ins['os'] = items[len(items)-1]
table = al_os
elif items[0] == 'Development Status':
ins['status'] = items[len(items)-1]
table = al_status
elif items[0] == 'License':
ins['license'] = items[len(items)-1]
table = al_license
elif items[0] == 'Environment':
ins['environment'] = items[len(items)-1]
table = al_environment
elif items[0] == 'Topic':
ins['topic'] = items[len(items)-1]
table = al_topic
elif items[0] == 'Programming Language':
ins['language'] = items[len(items)-1]
table = al_language
try:
connection.execute(table.insert().values(ins))
except Exception as e:
#print e
pass
#time.sleep(5)
#print items[0] + ' ' + items[len(items)-1]
#for item in items:
#string += item+'::'
#print string[:len(string)-2]
#print 'name '+str(display_name)
#print 'unixname '+str(unixname)
#print 'reg: '+str(registered)
#print 'desc '+str(short_desc)
#print '\n'
db = create_engine('mysql://'+DB_USER+':'+DB_PASS+'@'+DB_ADDRESS+'?charset=utf8&use_unicode=1')
connection = db.connect()
meta = MetaData()
meta.bind = connection
messages = Table(MESSAGES, meta, autoload=True)
msg_refs = Table(MSG_REFS, meta, autoload=True)
mailing_lists = Table(MAILING_LISTS, meta, autoload=True)
projects = Table(PROJECTS, meta, autoload=True)
project_indexes = Table(PROJECT_INDEXES, meta, autoload=True)
mailing_list_indexes = Table(MAILING_LIST_INDEXES, meta, autoload=True)
message_indexes = Table(MESSAGES_INDEXES, meta, autoload=True)
al_audience = Table(AUDIENCE, meta, autoload=True)
al_os = Table(OS, meta, autoload=True)
al_status = Table(STATUS, meta, autoload=True)
al_license = Table(LICENSE, meta, autoload=True)
al_environment = Table(ENVIRONMENT, meta, autoload=True)
al_topic = Table(TOPIC, meta, autoload=True)
al_language = Table(LANGUAGE, meta, autoload=True)
#
#more tables
#
print 'Parsing mailing list messages, this will take a while.'
parseMessages()
print 'Parsing mailing lists, shouldn\'t take long'
parseMailingLists()
print 'Parsing projects (final step)'
proj_list = connection.execute('SELECT id FROM '+PROJECT_INDEXES+' WHERE datasource_id = '+str(DATASOURCE)+';')
for proj in proj_list:
h = connection.execute('SELECT html FROM '+PROJECT_INDEXES+' WHERE id = '+str(proj['id'])+';')
html = h.fetchone()['html']
h.close()
#proj_page = proj['html']
x = re.findall(r'<div typeof="doap:Project sioc:Space".*?</strong></td></tr></table></div>',html,re.DOTALL)
for each in x:
parseProject(each)
| Python |
import re
import sys
import os
import sqlalchemy
from sqlalchemy import *
import ConfigParser
import warnings
config = ConfigParser.RawConfigParser()
config.read('udd.conf')
#disables warnings
warnings.filterwarnings('ignore', '.*')
try:
DB_USER = config.get('udd','user')
DB_PASS = config.get('udd','pass')
DB_ADDRESS = config.get('udd','database')
except:
print 'error reading the config file, please make sure it is set up properly, and is in the working directory'
sys.exit(1)
try:
DATASOURCE = int(sys.argv[1])
FILENAME = sys.argv[2]
if not os.path.exists(FILENAME):
raise Error('bad filename')
except:
print 'error reading command line input, should be datasource followed by path the sql file'
sys.exit(1)
mysql_db = create_engine('mysql://'+DB_USER+':'+DB_PASS+'@'+DB_ADDRESS+'?charset=utf8&use_unicode=0')
connection = mysql_db.connect()
meta = MetaData()
meta.bind = connection
f = open(FILENAME,'r')
cols = []
name= null
cur=null
for line in f:
if line[:2] == '\\.':
name = null
table = re.match('COPY\s+(\w+)\s+\((.*?)\)\s+FROM\s+stdin;',line)
if name != null:
values = line.split('\t')
ins = cur.insert()
statements = {'datasource_id': str(DATASOURCE)}
for i in range(len(cols)):
if values[i].strip() == '\\N':
statements[cols[i].strip()] = None
elif values[i].strip() == 't':
statements[cols[i].strip()] = '1'
elif values[i].strip() == 'f':
statements[cols[i].strip()] = '0'
else:
statements[cols[i].strip()] = values[i].strip()
try:
ins.execute(statements)
except:
print statements
del(statements)
if table:
name = 'udd_'+table.group(1)
print name
cur = Table('udd_'+table.group(1), meta, autoload=True)
cols = table.group(2).split(',')
for i in range(len(cols)):
if cols[i].strip() == 'file':
cols[i] = 'filename'
elif cols[i].strip() == '"time"':
cols[i] = 'time'
elif cols[i].strip() == 'release':
cols[i] = 'released'
elif cols[i].strip() == 'key':
cols[i] = 'key_info'
connection.close()
| Python |
#!/usr/bin/env python
import MySQLdb
import sys
import re
import traceback
import signal
import threading
from debian_Utilities import Debian_Utilities
#Global variables (not sure if we need this)
#isError = False
#Class for main routine
class Debian_ParseDevelopers:
#Signal handler for clean exiting (SIGINT)
def sigHandler(self, signum, frame):
try:
self.parse_Thread.join()
self.util.updateStatus('complete', self.job[0])
print 'Clean exit'
except:
self.util.postError(traceback.format_exc(), self.job[0])
raise SystemExit
#Main routine
def __init__(self):
self.util = Debian_Utilities()
signal.signal(signal.SIGINT, self.sigHandler)
while(True):
global isError
isError = False
self.job = None
self.job = self.util.findJob('devParse')
if (self.job):
try:
print 'Starting ' + self.job[1]
self.util.updateStatus('in progress', self.job[0])
self.parse_Thread = Parse_Thread(self.util, self.job[0], self.job[1], self.job[2], self.job[3])
self.parse_Thread.start()
self.parse_Thread.join()
if not isError:
self.util.updateStatus('complete', self.job[0])
print 'Finishing ' + self.job[1]
except SystemExit:
sys.exit()
except:
self.util.postError(traceback.format_exc(), self.job[0])
#Threaded class that does the actual work
class Parse_Thread(threading.Thread):
def __init__(self, util, job_id, projName, debianType, datasource):
threading.Thread.__init__(self)
self.util = util
self.job_id = job_id
self.projName = projName
self.debianType = debianType
self.datasource = datasource
#Parse routine that populates the developers table
def run(self):
try:
select = '''SELECT devshtml
FROM debian_project_indexes_stable
WHERE proj_unixname = %s
AND datasource_id = %s'''
devhtml = self.util.execQuery(select, self.projName, self.datasource)[0]
result = re.search(r'<td class="labelcell">Maintainer</td>\s*<td class="contentcell">\s*<a href="(.*?)">(.*?)</a>\s*<a class="email" href="mailto:(.*?)">', devhtml, re.DOTALL)
url = result.group(1)
name = result.group(2)
email = result.group(3)
insert = """INSERT INTO debian_project_developers(
proj_unixname,
datasource_id,
name,
email,
url,
role,
date_collected)
VALUES(%s, %s, %s, %s, %s, 'Maintainer', NOW())"""
self.util.execQuery(insert, self.projName, self.datasource, name, email, url)
result = re.search(r'Co-Maintainers</a></td>\s*<td class="contentcell">(.*?)</td>', devhtml)
if (result):
comaintlist = result.group(1)
comaints = str.split(r'<br>', comaintlist)
for comaint in comaints:
result = re.search(r'<a href="(.*?)">(.*?)</a>\s*[<a class="email" href="mailto:(.*?)>mail</a>]', comaint)
if (result):
url = result.group(1)
name = result.group(2)
email = result.group(3)
insert = """INSERT INTO debian_project_developers(
proj_unixname,
datasource_id,
name,
email,
url,
role,
date_collected)
VALUES(%s, %s, %s, %s, %s, 'Co-Maintainer', NOW())"""
self.util.execQuery(insert, self.projName, self.datasource, name, email, url)
except:
global isError
isError = True
self.util.postError(traceback.format_exc(), self.job_id)
#Runs the main class
Debian_ParseDevelopers() | Python |
#!/usr/bin/env python
import MySQLdb
import sys
import re
import traceback
import signal
import threading
from debian_Utilities import Debian_Utilities
#Global variables (not sure if we need this)
#isError = False
#Class for main routine
class Debian_ParseDescriptions:
#Signal handler for clean exiting (SIGINT)
def sigHandler(self, signum, frame):
try:
self.parse_Thread.join()
self.util.updateStatus('complete', self.job[0])
print 'Clean exit'
except:
self.util.postError(traceback.format_exc(), self.job[0])
raise SystemExit
#Main routine
def __init__(self):
self.util = Debian_Utilities()
signal.signal(signal.SIGINT, self.sigHandler)
while(True):
global isError
isError = False
self.job = None
self.job = self.util.findJob('descParse')
if (self.job):
try:
print 'Starting ' + self.job[1]
self.util.updateStatus('in progress', self.job[0])
self.parse_Thread = Parse_Thread(self.util, self.job[0], self.job[1], self.job[2], self.job[3])
self.parse_Thread.start()
self.parse_Thread.join()
if not isError:
self.util.updateStatus('complete', self.job[0])
print 'Finishing ' + self.job[1]
except SystemExit:
sys.exit()
except:
self.util.postError(traceback.format_exc(), self.job[0])
#Threaded class that does the actual work
class Parse_Thread(threading.Thread):
def __init__(self, util, job_id, projName, debianType, datasource):
threading.Thread.__init__(self)
self.util = util
self.job_id = job_id
self.projName = projName
self.debianType = debianType
self.datasource = datasource
#Parse routine that gets the project description, long name, version, and any
#URLs included in the description
def run(self):
try:
if self.debianType == 'stable':
table = 'debian_project_indexes_stable'
elif self.debianType == 'testing':
table = 'debian_project_indexes_testing'
elif self.debianType == 'unstable':
table = 'debian_project_indexes_unstable'
select = 'SELECT indexhtml FROM ' + table + '''
WHERE proj_unixname = %s
AND datasource_id = %s'''
indexhtml = self.util.execQuery(select, self.projName, self.datasource)[0]
version = re.search(r'<h1>Package:.*?\((.*?)\)\s*</h1>', indexhtml, re.DOTALL).group(1)
result = re.search(r'<div id="pdesc"\s*>\s*<h2>(.*?)</h2>\s*<p>(.*?)</div>', indexhtml, re.DOTALL)
longname = result.group(1)
description = result.group(2)
result1 = re.search(r'Homepage: <a href="(.*?)"', description)
result2 = re.search(r'http://(.*?)"', description)
result3 = re.search(r'http://(.*?)\s+', description)
result4 = re.search(r'<a href="(.*?)"', description)
if (result1):
homepage = result1.group(1)
elif (result2):
homepage = r'http://' + result2.group(1)
elif (result3):
homepage = r'http://' + result3.group(1)
elif (result4):
homepage = result4.group(1)
else:
homepage = ''
insert = '''INSERT INTO debian_projects
(proj_unixname,
datasource_id,
type,
proj_longname,
description,
descr_homepage,
parentpath,
version,
date_collected)
VALUES(%s, %s, %s, %s, %s, %s, '', %s, NOW())''' #parentpath no longer in use
self.util.execQuery(insert, self.projName, self.datasource, self.debianType, longname, description, homepage, version)
except:
global isError
isError = True
print traceback.format_exc()
self.util.postError(traceback.format_exc(), self.job_id)
#Runs the main class
Debian_ParseDescriptions() | Python |
#!/usr/bin/env python
import MySQLdb
import sys
import traceback
import signal
import threading
import re
from debian_Utilities import Debian_Utilities
#Global variables (not sure if we need this)
#isError = False
#Class for main routine
class Debian_ParseCopyrights:
#Signal handler for clean exiting (SIGINT)
def sigHandler(self, signum, frame):
try:
self.parse_Thread.join()
self.util.updateStatus('complete', self.job[0])
print 'Clean exit'
except:
self.util.postError(traceback.format_exc(), self.job[0])
raise SystemExit
#Main routine
def __init__(self):
self.util = Debian_Utilities()
signal.signal(signal.SIGINT, self.sigHandler)
while(True):
global isError
isError = False
self.job = None
self.job = self.util.findJob('copyrightParse')
if (self.job):
try:
print 'Starting ' + self.job[1]
self.parse_Thread = Parse_Thread(self.util, self.job[0], self.job[1], self.job[2], self.job[3])
self.parse_Thread.start()
self.parse_Thread.join()
if not isError:
self.util.updateStatus('complete', self.job[0])
print 'Finishing ' + self.job[1]
except SystemExit:
sys.exit()
except:
self.util.postError(traceback.format_exc(), self.job[0])
#Threaded class that does the actual work
class Parse_Thread(threading.Thread):
def __init__(self, util, job_id, projName, debianType, dataSource):
threading.Thread.__init__(self)
self.util = util
self.job_id = job_id
self.projName = projName
self.debianType = debianType
self.datasource = dataSource
#Parse routine that finds the homepage URLs in the copyright HTML and inserts
#it in the copyright table
def run(self):
try:
select = '''SELECT copyrighthtml
FROM debian_project_indexes_stable
WHERE proj_unixname = %s
AND datasource_id = %s'''
cphtml = self.util.execQuery(select, self.projName, self.datasource)[0]
result1 = re.search(r'http://(.*?)>', cphtml)
result2 = re.search(r'http://(.*?)<', cphtml)
result3 = re.search(r'http://(.*?)\)', cphtml)
result4 = re.search(r'http://(.*?),', cphtml)
result5 = re.search(r'http://(.*?)"', cphtml)
result6 = re.search(r'http://(.*?)\'', cphtml)
result7 = re.search(r'http://(.*?)/\.', cphtml)
result8 = re.search(r'http://(.*?).$', cphtml)
result9 = re.search(r'http://(.*?);', cphtml)
result10 = re.search(r'http://(.*?)\s+', cphtml)
result11 = re.search(r'http://(.*?)$', cphtml)
if (result1):
homepage = 'http://' + result1.group(1)
elif (result2):
homepage = 'http://' + result2.group(1)
elif (result3):
homepage = 'http://' + result3.group(1)
elif (result4):
homepage = 'http://' + result4.group(1)
elif (result5):
homepage = 'http://' + result5.group(1)
elif (result6):
homepage = 'http://' + result6.group(1)
elif (result7):
homepage = 'http://' + result7.group(1)
elif (result8):
homepage = 'http://' + result8.group(1)
elif (result9):
homepage = 'http://' + result9.group(1)
elif (result10):
homepage = 'http://' + result10.group(1)
elif (result11):
homepage = 'http://' + result11.group(1)
else:
homepage = ''
homepage = homepage.rstrip(' ,)><\'";')
insert = """INSERT INTO debian_copyright_urls(
proj_unixname,
datasource_id,
url,
date_collected)
VALUES(%s, %s, %s, NOW())"""
self.util.execQuery(insert, self.projName, self.datasource, homepage)
except:
global isError
isError = True
self.util.postError(traceback.format_exc(), self.job_id)
#Runs the main class
Debian_ParseCopyrights() | Python |
#!/usr/bin/env python
import MySQLdb
import sys
import re
import traceback
import signal
import threading
from debian_Utilities import Debian_Utilities
#Global variables (not sure if we need this)
#isError = False
#Class for main routine
class Debian_ParseDescriptions:
#Signal handler for clean exiting (SIGINT)
def sigHandler(self, signum, frame):
try:
self.parse_Thread.join()
self.util.updateStatus('complete', self.job[0])
print 'Clean exit'
except:
self.util.postError(traceback.format_exc(), self.job[0])
raise SystemExit
#Main routine
def __init__(self):
self.util = Debian_Utilities()
signal.signal(signal.SIGINT, self.sigHandler)
while(True):
global isError
isError = False
self.job = None
self.job = self.util.findJob('descParse')
if (self.job):
try:
print 'Starting ' + self.job[1]
self.util.updateStatus('in progress', self.job[0])
self.parse_Thread = Parse_Thread(self.util, self.job[0], self.job[1], self.job[2], self.job[3])
self.parse_Thread.start()
self.parse_Thread.join()
if not isError:
self.util.updateStatus('complete', self.job[0])
print 'Finishing ' + self.job[1]
except SystemExit:
sys.exit()
except:
self.util.postError(traceback.format_exc(), self.job[0])
#Threaded class that does the actual work
class Parse_Thread(threading.Thread):
def __init__(self, util, job_id, projName, debianType, datasource):
threading.Thread.__init__(self)
self.util = util
self.job_id = job_id
self.projName = projName
self.debianType = debianType
self.datasource = datasource
#Parse routine that gets the project description, long name, version, and any
#URLs included in the description
def run(self):
try:
if self.debianType == 'stable':
table = 'debian_project_indexes_stable'
elif self.debianType == 'testing':
table = 'debian_project_indexes_testing'
elif self.debianType == 'unstable':
table = 'debian_project_indexes_unstable'
select = 'SELECT indexhtml FROM ' + table + '''
WHERE proj_unixname = %s
AND datasource_id = %s'''
indexhtml = self.util.execQuery(select, self.projName, self.datasource)[0]
version = re.search(r'<h1>Package:.*?\((.*?)\)\s*</h1>', indexhtml, re.DOTALL).group(1)
result = re.search(r'<div id="pdesc"\s*>\s*<h2>(.*?)</h2>\s*<p>(.*?)</div>', indexhtml, re.DOTALL)
longname = result.group(1)
description = result.group(2)
result1 = re.search(r'Homepage: <a href="(.*?)"', description)
result2 = re.search(r'http://(.*?)"', description)
result3 = re.search(r'http://(.*?)\s+', description)
result4 = re.search(r'<a href="(.*?)"', description)
if (result1):
homepage = result1.group(1)
elif (result2):
homepage = r'http://' + result2.group(1)
elif (result3):
homepage = r'http://' + result3.group(1)
elif (result4):
homepage = result4.group(1)
else:
homepage = ''
insert = '''INSERT INTO debian_projects
(proj_unixname,
datasource_id,
type,
proj_longname,
description,
descr_homepage,
parentpath,
version,
date_collected)
VALUES(%s, %s, %s, %s, %s, %s, '', %s, NOW())''' #parentpath no longer in use
self.util.execQuery(insert, self.projName, self.datasource, self.debianType, longname, description, homepage, version)
except:
global isError
isError = True
print traceback.format_exc()
self.util.postError(traceback.format_exc(), self.job_id)
#Runs the main class
Debian_ParseDescriptions() | Python |
#!/usr/bin/env python
#This script will fill the job queue with the desired set of Debian projects
#for parsing.
import MySQLdb
import urllib2
import sys
import re
from optparse import OptionParser
from debian_Utilities import Debian_Utilities
STABLEPACKAGESURL = "http://packages.debian.org/stable/allpackages"
TESTINGPACKAGESURL = "http://packages.debian.org/testing/allpackages"
UNSTABLEPACKAGESURL = "http://packages.debian.org/unstable/allpackages"
DBFILENAME = "dbInfo.txt"
class Debian_MakeJobs:
#Main routine
def __init__(self):
self.util = Debian_Utilities()
parser = OptionParser()
parser.add_option('-g', '--group', action='store', type='string', dest='GROUP')
parser.add_option('-d', '--datasource', action='store', type='int', dest='DATASOURCE')
parser.set_default('DATASOURCE', '0')
parser.set_default('GROUP', 'all')
(options, args) = parser.parse_args()
if options.GROUP != 'all':
for project in self.getProjectGroup(options.GROUP):
self.createJob(self.util, project, options.GROUP, options.DATASOURCE)
else:
for project in self.getProjectGroup('stable'):
self.createJob(self.util, project, 'stable', options.DATASOURCE)
for project in self.getProjectGroup('testing'):
self.createJob(self.util, project, 'testing', options.DATASOURCE)
for project in self.getProjectGroup('unstable'):
self.createJob(self.util, project, 'unstable', options.DATASOURCE)
#Returns the list of projects of type "type" where each project is a tuple of
#the form (project name, project version, project description)
def getProjectGroup(self, type):
try:
if type == 'stable':
html = urllib2.urlopen(STABLEPACKAGESURL).read()
elif type == 'testing':
html = urllib2.urlopen(TESTINGPACKAGESURL).read()
elif type == 'unstable':
html = urllib2.urlopen(UNSTABLEPACKAGESURL).read()
except:
print 'Error retreiving ' + type + ' project page HTML - exiting'
sys.exit()
try:
projects = re.finditer(r"<dt><a href='(.*?)'\s+id='.*?'>.*?</a>\s+\((.*?)\)\s*</dt>\s*<dd>(.*?)</dd>", html)
except:
print 'String matching error when retreiving ' + type + ' projects - exiting'
sys.exit()
projectList = []
for project in projects:
#Tuple: (projectname, version, description)
projectList.append((project.group(1), project.group(2), project.group(3)))
return projectList
#Creates the jobs for a specific project
def createJob(self, util, project, deb_type, dataSourceID):
insert = '''INSERT INTO debian_jobs(
proj_name,
debian_type,
job_type,
status,
datasource_id,
last_modified)
VALUES(%s, %s, %s, %s, %s, NOW())'''
try:
util.execQuery(insert, project[0], deb_type, 'htmlRetreival', 'pending', dataSourceID)
util.execQuery(insert, project[0], deb_type, 'descParse', 'pending', dataSourceID)
if deb_type == 'stable':
util.execQuery(insert, project[0], deb_type, 'devParse', 'pending', dataSourceID)
util.execQuery(insert, project[0], deb_type, 'copyrightParse', 'pending', dataSourceID)
except:
print 'Error creating jobs for project ' + project[0]
#Runs the main class
Debian_MakeJobs()
| Python |
#!/usr/bin/env python
import MySQLdb
import sys
import re
import traceback
import signal
import threading
from debian_Utilities import Debian_Utilities
#Global variables (not sure if we need this)
#isError = False
#Class for main routine
class Debian_ParseDevelopers:
#Signal handler for clean exiting (SIGINT)
def sigHandler(self, signum, frame):
try:
self.parse_Thread.join()
self.util.updateStatus('complete', self.job[0])
print 'Clean exit'
except:
self.util.postError(traceback.format_exc(), self.job[0])
raise SystemExit
#Main routine
def __init__(self):
self.util = Debian_Utilities()
signal.signal(signal.SIGINT, self.sigHandler)
while(True):
global isError
isError = False
self.job = None
self.job = self.util.findJob('devParse')
if (self.job):
try:
print 'Starting ' + self.job[1]
self.util.updateStatus('in progress', self.job[0])
self.parse_Thread = Parse_Thread(self.util, self.job[0], self.job[1], self.job[2], self.job[3])
self.parse_Thread.start()
self.parse_Thread.join()
if not isError:
self.util.updateStatus('complete', self.job[0])
print 'Finishing ' + self.job[1]
except SystemExit:
sys.exit()
except:
self.util.postError(traceback.format_exc(), self.job[0])
#Threaded class that does the actual work
class Parse_Thread(threading.Thread):
def __init__(self, util, job_id, projName, debianType, datasource):
threading.Thread.__init__(self)
self.util = util
self.job_id = job_id
self.projName = projName
self.debianType = debianType
self.datasource = datasource
#Parse routine that populates the developers table
def run(self):
try:
select = '''SELECT devshtml
FROM debian_project_indexes_stable
WHERE proj_unixname = %s
AND datasource_id = %s'''
devhtml = self.util.execQuery(select, self.projName, self.datasource)[0]
result = re.search(r'<td class="labelcell">Maintainer</td>\s*<td class="contentcell">\s*<a href="(.*?)">(.*?)</a>\s*<a class="email" href="mailto:(.*?)">', devhtml, re.DOTALL)
url = result.group(1)
name = result.group(2)
email = result.group(3)
insert = """INSERT INTO debian_project_developers(
proj_unixname,
datasource_id,
name,
email,
url,
role,
date_collected)
VALUES(%s, %s, %s, %s, %s, 'Maintainer', NOW())"""
self.util.execQuery(insert, self.projName, self.datasource, name, email, url)
result = re.search(r'Co-Maintainers</a></td>\s*<td class="contentcell">(.*?)</td>', devhtml)
if (result):
comaintlist = result.group(1)
comaints = str.split(r'<br>', comaintlist)
for comaint in comaints:
result = re.search(r'<a href="(.*?)">(.*?)</a>\s*[<a class="email" href="mailto:(.*?)>mail</a>]', comaint)
if (result):
url = result.group(1)
name = result.group(2)
email = result.group(3)
insert = """INSERT INTO debian_project_developers(
proj_unixname,
datasource_id,
name,
email,
url,
role,
date_collected)
VALUES(%s, %s, %s, %s, %s, 'Co-Maintainer', NOW())"""
self.util.execQuery(insert, self.projName, self.datasource, name, email, url)
except:
global isError
isError = True
self.util.postError(traceback.format_exc(), self.job_id)
#Runs the main class
Debian_ParseDevelopers() | Python |
#!/usr/bin/env python
import MySQLdb
import sys
import traceback
import signal
import threading
import re
from debian_Utilities import Debian_Utilities
#Global variables (not sure if we need this)
#isError = False
#Class for main routine
class Debian_ParseCopyrights:
#Signal handler for clean exiting (SIGINT)
def sigHandler(self, signum, frame):
try:
self.parse_Thread.join()
self.util.updateStatus('complete', self.job[0])
print 'Clean exit'
except:
self.util.postError(traceback.format_exc(), self.job[0])
raise SystemExit
#Main routine
def __init__(self):
self.util = Debian_Utilities()
signal.signal(signal.SIGINT, self.sigHandler)
while(True):
global isError
isError = False
self.job = None
self.job = self.util.findJob('copyrightParse')
if (self.job):
try:
print 'Starting ' + self.job[1]
self.parse_Thread = Parse_Thread(self.util, self.job[0], self.job[1], self.job[2], self.job[3])
self.parse_Thread.start()
self.parse_Thread.join()
if not isError:
self.util.updateStatus('complete', self.job[0])
print 'Finishing ' + self.job[1]
except SystemExit:
sys.exit()
except:
self.util.postError(traceback.format_exc(), self.job[0])
#Threaded class that does the actual work
class Parse_Thread(threading.Thread):
def __init__(self, util, job_id, projName, debianType, dataSource):
threading.Thread.__init__(self)
self.util = util
self.job_id = job_id
self.projName = projName
self.debianType = debianType
self.datasource = dataSource
#Parse routine that finds the homepage URLs in the copyright HTML and inserts
#it in the copyright table
def run(self):
try:
select = '''SELECT copyrighthtml
FROM debian_project_indexes_stable
WHERE proj_unixname = %s
AND datasource_id = %s'''
cphtml = self.util.execQuery(select, self.projName, self.datasource)[0]
result1 = re.search(r'http://(.*?)>', cphtml)
result2 = re.search(r'http://(.*?)<', cphtml)
result3 = re.search(r'http://(.*?)\)', cphtml)
result4 = re.search(r'http://(.*?),', cphtml)
result5 = re.search(r'http://(.*?)"', cphtml)
result6 = re.search(r'http://(.*?)\'', cphtml)
result7 = re.search(r'http://(.*?)/\.', cphtml)
result8 = re.search(r'http://(.*?).$', cphtml)
result9 = re.search(r'http://(.*?);', cphtml)
result10 = re.search(r'http://(.*?)\s+', cphtml)
result11 = re.search(r'http://(.*?)$', cphtml)
if (result1):
homepage = 'http://' + result1.group(1)
elif (result2):
homepage = 'http://' + result2.group(1)
elif (result3):
homepage = 'http://' + result3.group(1)
elif (result4):
homepage = 'http://' + result4.group(1)
elif (result5):
homepage = 'http://' + result5.group(1)
elif (result6):
homepage = 'http://' + result6.group(1)
elif (result7):
homepage = 'http://' + result7.group(1)
elif (result8):
homepage = 'http://' + result8.group(1)
elif (result9):
homepage = 'http://' + result9.group(1)
elif (result10):
homepage = 'http://' + result10.group(1)
elif (result11):
homepage = 'http://' + result11.group(1)
else:
homepage = ''
homepage = homepage.rstrip(' ,)><\'";')
insert = """INSERT INTO debian_copyright_urls(
proj_unixname,
datasource_id,
url,
date_collected)
VALUES(%s, %s, %s, NOW())"""
self.util.execQuery(insert, self.projName, self.datasource, homepage)
except:
global isError
isError = True
self.util.postError(traceback.format_exc(), self.job_id)
#Runs the main class
Debian_ParseCopyrights() | Python |
#!/usr/bin/env python
import sys
import MySQLdb
import traceback
#Constants
DBFILENAME = 'dbInfo.txt'
class Debian_Utilities:
#Executes the supplied query string using the supplied parameters and returns
#the first row returned by the query
def execQuery(self, queryString, *params):
try:
cursor = self.dbh.cursor()
cursor.execute(queryString, params)
except:
raise Exception(traceback.format_exc())
return cursor.fetchone()
#Creates a database connection using the information contained in the DBFILENAME
#file and returns the database connection handle
def connect(self):
try:
dbFile = open(DBFILENAME, 'r')
except:
print 'Cannot open database information file - exiting'
sys.exit()
host = dbFile.readline().strip()
port = int(dbFile.readline().strip())
username = dbFile.readline().strip()
password = dbFile.readline().strip()
database = dbFile.readline().strip()
try:
dbh = MySQLdb.connect(host=host, user=username, passwd=password, db=database)
except:
print 'Error connecting to database - exiting\nTraceback:\n' + traceback.format_exc()
sys.exit()
return dbh
#Returns a tuple containing the information of a job to be completed in the form
#of (job ID, project name, debian type, datasource id)
def findJob(self, job_type):
lock = '''LOCK TABLE debian_jobs READ, debian_jobs as t WRITE'''
select = '''SELECT job_id, proj_name, debian_type, datasource_id
FROM debian_jobs AS t
WHERE job_type = %s
AND status = 'pending'
LIMIT 1;'''
update = '''UPDATE debian_jobs AS t
SET status = 'in progress',
last_modified = NOW()
WHERE job_id = %s'''
unlock = '''UNLOCK TABLES'''
try:
cursor = self.dbh.cursor()
cursor.execute(lock)
cursor.execute(select, (job_type))
result = cursor.fetchone()
cursor.execute(update, (result[0]))
cursor.execute(unlock)
except:
raise Exception('Error in job selection')
return result
#Updates the status of the specified job with the supplied status message
def updateStatus(self, status, id):
update = 'SELECT status FROM debian_jobs WHERE job_id=' + str(id) + ''' FOR UPDATE;
UPDATE debian_jobs
SET status = %s, last_modified = NOW()
WHERE job_id = %s'''
try:
self.dbh.cursor().execute(update, (status, id))
except:
print 'Error updating status to ' + status + ' on job ' + str(id) + ' - exiting'
sys.exit()
#Posts the supplied error message to the specified job in the job queue
def postError(self, message, id):
update = 'SELECT error_msg FROM debian_jobs WHERE job_id=' + str(id) + ''' FOR UPDATE;
UPDATE debian_jobs
SET status = 'error', error_msg = %s, last_modified = NOW()
WHERE job_id = %s'''
try:
self.dbh.cursor().execute(update, (message, id))
except:
print 'Error writing error message to job ' + str(id) + ': ' + message + ' - exiting'
sys.exit()
#Creates the database connection
def __init__(self):
self.dbh = self.connect()
| Python |
#!/usr/bin/env python
import MySQLdb
import sys
import traceback
import signal
import threading
import urllib2
import re
from debian_Utilities import Debian_Utilities
#Debian URL constants
STABLEURLBASE = 'http://packages.debian.org/stable/'
TESTINGURLBASE = 'http://packages.debian.org/testing/'
UNSTABLEURLBASE = 'http://packages.debian.org/unstable/'
#Class for main routine
class Debian_RetreiveHTML:
#Signal handler for clean exiting (SIGINT)
def sigHandler(self, signum, frame):
try:
self.parse_Thread.join()
self.util.updateStatus('complete', self.job[0])
print 'Clean exit'
except:
self.util.postError(traceback.format_exc(), self.job[0])
raise SystemExit
#Main routine
def __init__(self):
self.util = Debian_Utilities()
signal.signal(signal.SIGINT, self.sigHandler)
while(True):
global isError
isError = False
self.job = None
self.job = self.util.findJob('htmlRetreival')
if (self.job):
try:
print 'Starting ' + self.job[1]
self.parse_Thread = Parse_Thread(self.util, self.job[0], self.job[1], self.job[2], self.job[3])
self.parse_Thread.start()
self.parse_Thread.join()
if isError == False:
self.util.updateStatus('complete', self.job[0])
print 'Finishing ' + self.job[1]
except SystemExit:
sys.exit()
except:
self.util.postError(traceback.format_exc(), self.job[0])
#Threaded class that does the actual work
class Parse_Thread(threading.Thread):
def __init__(self, util, job_id, projName, debianType, datasource):
threading.Thread.__init__(self)
self.util = util
self.job_id = job_id
self.projName = projName
self.debianType = debianType
self.datasource = datasource
#Parse routine that populates the html fields in the proper
#debian_project_indexes_<group> table
def run(self):
try:
if self.debianType == 'stable':
indexhtml = urllib2.urlopen(STABLEURLBASE + self.projName).read()
elif self.debianType == 'testing':
table = 'debian_project_indexes_testing'
indexhtml = urllib2.urlopen(TESTINGURLBASE + self.projName).read()
elif self.debianType == 'unstable':
table = 'debian_project_indexes_unstable'
indexhtml = urllib2.urlopen(UNSTABLEURLBASE + self.projName).read()
#We only grab this stuff for the stable distribution
if self.debianType == 'stable':
clURL = re.search(r'<a href="(.*?)">Debian Changelog</a>', indexhtml).group(1)
cpURL = re.search(r'<a href="(.*?)">Copyright File</a>', indexhtml).group(1)
bugURL = r'http://bugs.debian.org/' + self.projName
devURL = re.search(r'<a href="(.*?)">Developer Information', indexhtml).group(1)
devhtml = urllib2.urlopen(devURL).read()
bughtml = urllib2.urlopen(bugURL).read()
cphtml = urllib2.urlopen(cpURL).read()
clhtml = urllib2.urlopen(clURL).read()
#This info only exists in the debian_project_indexes_stable table
if self.debianType == 'stable':
insert = '''INSERT INTO debian_project_indexes_stable (
proj_unixname,
datasource_id,
indexhtml,
bugshtml,
devshtml,
copyrighthtml,
changeloghtml,
date_collected)
VALUES(%s, %s, %s, %s, %s, %s, %s, NOW())'''
self.util.execQuery(insert, self.projName, self.datasource, indexhtml, bughtml, devhtml, cphtml, clhtml)
#In the other two tables we only grab the index HTML
else:
insert = 'INSERT INTO ' + table + ''' (
proj_unixname,
datasource_id,
indexhtml,
date_collected)
VALUES(%s, %s, %s, NOW())'''
self.util.execQuery(insert, self.projName, self.datasource, indexhtml)
except:
global isError
isError = True
self.util.postError(traceback.format_exc(), self.job_id)
#Runs the main class
Debian_RetreiveHTML()
| Python |
#!/usr/bin/env python
#This script will fill the job queue with the desired set of Debian projects
#for parsing.
import MySQLdb
import urllib2
import sys
import re
from optparse import OptionParser
from debian_Utilities import Debian_Utilities
STABLEPACKAGESURL = "http://packages.debian.org/stable/allpackages"
TESTINGPACKAGESURL = "http://packages.debian.org/testing/allpackages"
UNSTABLEPACKAGESURL = "http://packages.debian.org/unstable/allpackages"
DBFILENAME = "dbInfo.txt"
class Debian_MakeJobs:
#Main routine
def __init__(self):
self.util = Debian_Utilities()
parser = OptionParser()
parser.add_option('-g', '--group', action='store', type='string', dest='GROUP')
parser.add_option('-d', '--datasource', action='store', type='int', dest='DATASOURCE')
parser.set_default('DATASOURCE', '0')
parser.set_default('GROUP', 'all')
(options, args) = parser.parse_args()
if options.GROUP != 'all':
for project in self.getProjectGroup(options.GROUP):
self.createJob(self.util, project, options.GROUP, options.DATASOURCE)
else:
for project in self.getProjectGroup('stable'):
self.createJob(self.util, project, 'stable', options.DATASOURCE)
for project in self.getProjectGroup('testing'):
self.createJob(self.util, project, 'testing', options.DATASOURCE)
for project in self.getProjectGroup('unstable'):
self.createJob(self.util, project, 'unstable', options.DATASOURCE)
#Returns the list of projects of type "type" where each project is a tuple of
#the form (project name, project version, project description)
def getProjectGroup(self, type):
try:
if type == 'stable':
html = urllib2.urlopen(STABLEPACKAGESURL).read()
elif type == 'testing':
html = urllib2.urlopen(TESTINGPACKAGESURL).read()
elif type == 'unstable':
html = urllib2.urlopen(UNSTABLEPACKAGESURL).read()
except:
print 'Error retreiving ' + type + ' project page HTML - exiting'
sys.exit()
try:
projects = re.finditer(r"<dt><a href='(.*?)'\s+id='.*?'>.*?</a>\s+\((.*?)\)\s*</dt>\s*<dd>(.*?)</dd>", html)
except:
print 'String matching error when retreiving ' + type + ' projects - exiting'
sys.exit()
projectList = []
for project in projects:
#Tuple: (projectname, version, description)
projectList.append((project.group(1), project.group(2), project.group(3)))
return projectList
#Creates the jobs for a specific project
def createJob(self, util, project, deb_type, dataSourceID):
insert = '''INSERT INTO debian_jobs(
proj_name,
debian_type,
job_type,
status,
datasource_id,
last_modified)
VALUES(%s, %s, %s, %s, %s, NOW())'''
try:
util.execQuery(insert, project[0], deb_type, 'htmlRetreival', 'pending', dataSourceID)
util.execQuery(insert, project[0], deb_type, 'descParse', 'pending', dataSourceID)
if deb_type == 'stable':
util.execQuery(insert, project[0], deb_type, 'devParse', 'pending', dataSourceID)
util.execQuery(insert, project[0], deb_type, 'copyrightParse', 'pending', dataSourceID)
except:
print 'Error creating jobs for project ' + project[0]
#Runs the main class
Debian_MakeJobs()
| Python |
#!/usr/bin/env python
import sys
import MySQLdb
import traceback
#Constants
DBFILENAME = 'dbInfo.txt'
class Debian_Utilities:
#Executes the supplied query string using the supplied parameters and returns
#the first row returned by the query
def execQuery(self, queryString, *params):
try:
cursor = self.dbh.cursor()
cursor.execute(queryString, params)
except:
raise Exception(traceback.format_exc())
return cursor.fetchone()
#Creates a database connection using the information contained in the DBFILENAME
#file and returns the database connection handle
def connect(self):
try:
dbFile = open(DBFILENAME, 'r')
except:
print 'Cannot open database information file - exiting'
sys.exit()
host = dbFile.readline().strip()
port = int(dbFile.readline().strip())
username = dbFile.readline().strip()
password = dbFile.readline().strip()
database = dbFile.readline().strip()
try:
dbh = MySQLdb.connect(host=host, user=username, passwd=password, db=database)
except:
print 'Error connecting to database - exiting\nTraceback:\n' + traceback.format_exc()
sys.exit()
return dbh
#Returns a tuple containing the information of a job to be completed in the form
#of (job ID, project name, debian type, datasource id)
def findJob(self, job_type):
lock = '''LOCK TABLE debian_jobs READ, debian_jobs as t WRITE'''
select = '''SELECT job_id, proj_name, debian_type, datasource_id
FROM debian_jobs AS t
WHERE job_type = %s
AND status = 'pending'
LIMIT 1;'''
update = '''UPDATE debian_jobs AS t
SET status = 'in progress',
last_modified = NOW()
WHERE job_id = %s'''
unlock = '''UNLOCK TABLES'''
try:
cursor = self.dbh.cursor()
cursor.execute(lock)
cursor.execute(select, (job_type))
result = cursor.fetchone()
cursor.execute(update, (result[0]))
cursor.execute(unlock)
except:
raise Exception('Error in job selection')
return result
#Updates the status of the specified job with the supplied status message
def updateStatus(self, status, id):
update = 'SELECT status FROM debian_jobs WHERE job_id=' + str(id) + ''' FOR UPDATE;
UPDATE debian_jobs
SET status = %s, last_modified = NOW()
WHERE job_id = %s'''
try:
self.dbh.cursor().execute(update, (status, id))
except:
print 'Error updating status to ' + status + ' on job ' + str(id) + ' - exiting'
sys.exit()
#Posts the supplied error message to the specified job in the job queue
def postError(self, message, id):
update = 'SELECT error_msg FROM debian_jobs WHERE job_id=' + str(id) + ''' FOR UPDATE;
UPDATE debian_jobs
SET status = 'error', error_msg = %s, last_modified = NOW()
WHERE job_id = %s'''
try:
self.dbh.cursor().execute(update, (message, id))
except:
print 'Error writing error message to job ' + str(id) + ': ' + message + ' - exiting'
sys.exit()
#Creates the database connection
def __init__(self):
self.dbh = self.connect()
| Python |
#!/usr/bin/env python
import MySQLdb
import sys
import traceback
import signal
import threading
import urllib2
import re
from debian_Utilities import Debian_Utilities
#Debian URL constants
STABLEURLBASE = 'http://packages.debian.org/stable/'
TESTINGURLBASE = 'http://packages.debian.org/testing/'
UNSTABLEURLBASE = 'http://packages.debian.org/unstable/'
#Class for main routine
class Debian_RetreiveHTML:
#Signal handler for clean exiting (SIGINT)
def sigHandler(self, signum, frame):
try:
self.parse_Thread.join()
self.util.updateStatus('complete', self.job[0])
print 'Clean exit'
except:
self.util.postError(traceback.format_exc(), self.job[0])
raise SystemExit
#Main routine
def __init__(self):
self.util = Debian_Utilities()
signal.signal(signal.SIGINT, self.sigHandler)
while(True):
global isError
isError = False
self.job = None
self.job = self.util.findJob('htmlRetreival')
if (self.job):
try:
print 'Starting ' + self.job[1]
self.parse_Thread = Parse_Thread(self.util, self.job[0], self.job[1], self.job[2], self.job[3])
self.parse_Thread.start()
self.parse_Thread.join()
if isError == False:
self.util.updateStatus('complete', self.job[0])
print 'Finishing ' + self.job[1]
except SystemExit:
sys.exit()
except:
self.util.postError(traceback.format_exc(), self.job[0])
#Threaded class that does the actual work
class Parse_Thread(threading.Thread):
def __init__(self, util, job_id, projName, debianType, datasource):
threading.Thread.__init__(self)
self.util = util
self.job_id = job_id
self.projName = projName
self.debianType = debianType
self.datasource = datasource
#Parse routine that populates the html fields in the proper
#debian_project_indexes_<group> table
def run(self):
try:
if self.debianType == 'stable':
indexhtml = urllib2.urlopen(STABLEURLBASE + self.projName).read()
elif self.debianType == 'testing':
table = 'debian_project_indexes_testing'
indexhtml = urllib2.urlopen(TESTINGURLBASE + self.projName).read()
elif self.debianType == 'unstable':
table = 'debian_project_indexes_unstable'
indexhtml = urllib2.urlopen(UNSTABLEURLBASE + self.projName).read()
#We only grab this stuff for the stable distribution
if self.debianType == 'stable':
clURL = re.search(r'<a href="(.*?)">Debian Changelog</a>', indexhtml).group(1)
cpURL = re.search(r'<a href="(.*?)">Copyright File</a>', indexhtml).group(1)
bugURL = r'http://bugs.debian.org/' + self.projName
devURL = re.search(r'<a href="(.*?)">Developer Information', indexhtml).group(1)
devhtml = urllib2.urlopen(devURL).read()
bughtml = urllib2.urlopen(bugURL).read()
cphtml = urllib2.urlopen(cpURL).read()
clhtml = urllib2.urlopen(clURL).read()
#This info only exists in the debian_project_indexes_stable table
if self.debianType == 'stable':
insert = '''INSERT INTO debian_project_indexes_stable (
proj_unixname,
datasource_id,
indexhtml,
bugshtml,
devshtml,
copyrighthtml,
changeloghtml,
date_collected)
VALUES(%s, %s, %s, %s, %s, %s, %s, NOW())'''
self.util.execQuery(insert, self.projName, self.datasource, indexhtml, bughtml, devhtml, cphtml, clhtml)
#In the other two tables we only grab the index HTML
else:
insert = 'INSERT INTO ' + table + ''' (
proj_unixname,
datasource_id,
indexhtml,
date_collected)
VALUES(%s, %s, %s, NOW())'''
self.util.execQuery(insert, self.projName, self.datasource, indexhtml)
except:
global isError
isError = True
self.util.postError(traceback.format_exc(), self.job_id)
#Runs the main class
Debian_RetreiveHTML()
| Python |
#!/usr/bin/env python
#
# Copyright 2006, 2007 Google Inc. All Rights Reserved.
# Author: danderson@google.com (David Anderson)
#
# Script for uploading files to a Google Code project.
#
# This is intended to be both a useful script for people who want to
# streamline project uploads and a reference implementation for
# uploading files to Google Code projects.
#
# To upload a file to Google Code, you need to provide a path to the
# file on your local machine, a small summary of what the file is, a
# project name, and a valid account that is a member or owner of that
# project. You can optionally provide a list of labels that apply to
# the file. The file will be uploaded under the same name that it has
# in your local filesystem (that is, the "basename" or last path
# component). Run the script with '--help' to get the exact syntax
# and available options.
#
# Note that the upload script requests that you enter your
# googlecode.com password. This is NOT your Gmail account password!
# This is the password you use on googlecode.com for committing to
# Subversion and uploading files. You can find your password by going
# to http://code.google.com/hosting/settings when logged in with your
# Gmail account. If you have already committed to your project's
# Subversion repository, the script will automatically retrieve your
# credentials from there (unless disabled, see the output of '--help'
# for details).
#
# If you are looking at this script as a reference for implementing
# your own Google Code file uploader, then you should take a look at
# the upload() function, which is the meat of the uploader. You
# basically need to build a multipart/form-data POST request with the
# right fields and send it to https://PROJECT.googlecode.com/files .
# Authenticate the request using HTTP Basic authentication, as is
# shown below.
#
# Licensed under the terms of the Apache Software License 2.0:
# http://www.apache.org/licenses/LICENSE-2.0
#
# Questions, comments, feature requests and patches are most welcome.
# Please direct all of these to the Google Code users group:
# http://groups.google.com/group/google-code-hosting
"""Google Code file uploader script.
"""
__author__ = 'danderson@google.com (David Anderson)'
import httplib
import os.path
import optparse
import getpass
import base64
import sys
def upload(file, project_name, user_name, password, summary, labels=None):
"""Upload a file to a Google Code project's file server.
Args:
file: The local path to the file.
project_name: The name of your project on Google Code.
user_name: Your Google account name.
password: The googlecode.com password for your account.
Note that this is NOT your global Google Account password!
summary: A small description for the file.
labels: an optional list of label strings with which to tag the file.
Returns: a tuple:
http_status: 201 if the upload succeeded, something else if an
error occured.
http_reason: The human-readable string associated with http_status
file_url: If the upload succeeded, the URL of the file on Google
Code, None otherwise.
"""
# The login is the user part of user@gmail.com. If the login provided
# is in the full user@domain form, strip it down.
if user_name.endswith('@gmail.com'):
user_name = user_name[:user_name.index('@gmail.com')]
form_fields = [('summary', summary)]
if labels is not None:
form_fields.extend([('label', l.strip()) for l in labels])
content_type, body = encode_upload_request(form_fields, file)
upload_host = '%s.googlecode.com' % project_name
upload_uri = '/files'
auth_token = base64.b64encode('%s:%s'% (user_name, password))
headers = {
'Authorization': 'Basic %s' % auth_token,
'User-Agent': 'Googlecode.com uploader v0.9.4',
'Content-Type': content_type,
}
server = httplib.HTTPSConnection(upload_host)
server.request('POST', upload_uri, body, headers)
resp = server.getresponse()
server.close()
if resp.status == 201:
location = resp.getheader('Location', None)
else:
location = None
return resp.status, resp.reason, location
def encode_upload_request(fields, file_path):
"""Encode the given fields and file into a multipart form body.
fields is a sequence of (name, value) pairs. file is the path of
the file to upload. The file will be uploaded to Google Code with
the same file name.
Returns: (content_type, body) ready for httplib.HTTP instance
"""
BOUNDARY = '----------Googlecode_boundary_reindeer_flotilla'
CRLF = '\r\n'
body = []
# Add the metadata about the upload first
for key, value in fields:
body.extend(
['--' + BOUNDARY,
'Content-Disposition: form-data; name="%s"' % key,
'',
value,
])
# Now add the file itself
file_name = os.path.basename(file_path)
f = open(file_path, 'rb')
file_content = f.read()
f.close()
body.extend(
['--' + BOUNDARY,
'Content-Disposition: form-data; name="filename"; filename="%s"'
% file_name,
# The upload server determines the mime-type, no need to set it.
'Content-Type: application/octet-stream',
'',
file_content,
])
# Finalize the form body
body.extend(['--' + BOUNDARY + '--', ''])
return 'multipart/form-data; boundary=%s' % BOUNDARY, CRLF.join(body)
def upload_find_auth(file_path, project_name, summary, labels=None,
user_name=None, password=None, tries=3):
"""Find credentials and upload a file to a Google Code project's file server.
file_path, project_name, summary, and labels are passed as-is to upload.
Args:
file_path: The local path to the file.
project_name: The name of your project on Google Code.
summary: A small description for the file.
labels: an optional list of label strings with which to tag the file.
config_dir: Path to Subversion configuration directory, 'none', or None.
user_name: Your Google account name.
tries: How many attempts to make.
"""
while tries > 0:
if user_name is None:
# Read username if not specified or loaded from svn config, or on
# subsequent tries.
sys.stdout.write('Please enter your googlecode.com username: ')
sys.stdout.flush()
user_name = sys.stdin.readline().rstrip()
if password is None:
# Read password if not loaded from svn config, or on subsequent tries.
print 'Please enter your googlecode.com password.'
print '** Note that this is NOT your Gmail account password! **'
print 'It is the password you use to access Subversion repositories,'
print 'and can be found here: http://code.google.com/hosting/settings'
password = getpass.getpass()
status, reason, url = upload(file_path, project_name, user_name, password,
summary, labels)
# Returns 403 Forbidden instead of 401 Unauthorized for bad
# credentials as of 2007-07-17.
if status in [httplib.FORBIDDEN, httplib.UNAUTHORIZED]:
# Rest for another try.
user_name = password = None
tries = tries - 1
else:
# We're done.
break
return status, reason, url
def main():
parser = optparse.OptionParser(usage='googlecode-upload.py -s SUMMARY '
'-p PROJECT [options] FILE')
parser.add_option('-s', '--summary', dest='summary',
help='Short description of the file')
parser.add_option('-p', '--project', dest='project',
help='Google Code project name')
parser.add_option('-u', '--user', dest='user',
help='Your Google Code username')
parser.add_option('-w', '--password', dest='password',
help='Your Google Code password')
parser.add_option('-l', '--labels', dest='labels',
help='An optional list of comma-separated labels to attach '
'to the file')
options, args = parser.parse_args()
if not options.summary:
parser.error('File summary is missing.')
elif not options.project:
parser.error('Project name is missing.')
elif len(args) < 1:
parser.error('File to upload not provided.')
elif len(args) > 1:
parser.error('Only one file may be specified.')
file_path = args[0]
if options.labels:
labels = options.labels.split(',')
else:
labels = None
status, reason, url = upload_find_auth(file_path, options.project,
options.summary, labels,
options.user, options.password)
if url:
print 'The file was uploaded successfully.'
print 'URL: %s' % url
return 0
else:
print 'An error occurred. Your file was not uploaded.'
print 'Google Code upload server said: %s (%s)' % (reason, status)
return 1
if __name__ == '__main__':
sys.exit(main())
| Python |
#!/usr/bin/env python
#
# Copyright 2006, 2007 Google Inc. All Rights Reserved.
# Author: danderson@google.com (David Anderson)
#
# Script for uploading files to a Google Code project.
#
# This is intended to be both a useful script for people who want to
# streamline project uploads and a reference implementation for
# uploading files to Google Code projects.
#
# To upload a file to Google Code, you need to provide a path to the
# file on your local machine, a small summary of what the file is, a
# project name, and a valid account that is a member or owner of that
# project. You can optionally provide a list of labels that apply to
# the file. The file will be uploaded under the same name that it has
# in your local filesystem (that is, the "basename" or last path
# component). Run the script with '--help' to get the exact syntax
# and available options.
#
# Note that the upload script requests that you enter your
# googlecode.com password. This is NOT your Gmail account password!
# This is the password you use on googlecode.com for committing to
# Subversion and uploading files. You can find your password by going
# to http://code.google.com/hosting/settings when logged in with your
# Gmail account. If you have already committed to your project's
# Subversion repository, the script will automatically retrieve your
# credentials from there (unless disabled, see the output of '--help'
# for details).
#
# If you are looking at this script as a reference for implementing
# your own Google Code file uploader, then you should take a look at
# the upload() function, which is the meat of the uploader. You
# basically need to build a multipart/form-data POST request with the
# right fields and send it to https://PROJECT.googlecode.com/files .
# Authenticate the request using HTTP Basic authentication, as is
# shown below.
#
# Licensed under the terms of the Apache Software License 2.0:
# http://www.apache.org/licenses/LICENSE-2.0
#
# Questions, comments, feature requests and patches are most welcome.
# Please direct all of these to the Google Code users group:
# http://groups.google.com/group/google-code-hosting
"""Google Code file uploader script.
"""
__author__ = 'danderson@google.com (David Anderson)'
import httplib
import os.path
import optparse
import getpass
import base64
import sys
def upload(file, project_name, user_name, password, summary, labels=None):
"""Upload a file to a Google Code project's file server.
Args:
file: The local path to the file.
project_name: The name of your project on Google Code.
user_name: Your Google account name.
password: The googlecode.com password for your account.
Note that this is NOT your global Google Account password!
summary: A small description for the file.
labels: an optional list of label strings with which to tag the file.
Returns: a tuple:
http_status: 201 if the upload succeeded, something else if an
error occured.
http_reason: The human-readable string associated with http_status
file_url: If the upload succeeded, the URL of the file on Google
Code, None otherwise.
"""
# The login is the user part of user@gmail.com. If the login provided
# is in the full user@domain form, strip it down.
if user_name.endswith('@gmail.com'):
user_name = user_name[:user_name.index('@gmail.com')]
form_fields = [('summary', summary)]
if labels is not None:
form_fields.extend([('label', l.strip()) for l in labels])
content_type, body = encode_upload_request(form_fields, file)
upload_host = '%s.googlecode.com' % project_name
upload_uri = '/files'
auth_token = base64.b64encode('%s:%s'% (user_name, password))
headers = {
'Authorization': 'Basic %s' % auth_token,
'User-Agent': 'Googlecode.com uploader v0.9.4',
'Content-Type': content_type,
}
server = httplib.HTTPSConnection(upload_host)
server.request('POST', upload_uri, body, headers)
resp = server.getresponse()
server.close()
if resp.status == 201:
location = resp.getheader('Location', None)
else:
location = None
return resp.status, resp.reason, location
def encode_upload_request(fields, file_path):
"""Encode the given fields and file into a multipart form body.
fields is a sequence of (name, value) pairs. file is the path of
the file to upload. The file will be uploaded to Google Code with
the same file name.
Returns: (content_type, body) ready for httplib.HTTP instance
"""
BOUNDARY = '----------Googlecode_boundary_reindeer_flotilla'
CRLF = '\r\n'
body = []
# Add the metadata about the upload first
for key, value in fields:
body.extend(
['--' + BOUNDARY,
'Content-Disposition: form-data; name="%s"' % key,
'',
value,
])
# Now add the file itself
file_name = os.path.basename(file_path)
f = open(file_path, 'rb')
file_content = f.read()
f.close()
body.extend(
['--' + BOUNDARY,
'Content-Disposition: form-data; name="filename"; filename="%s"'
% file_name,
# The upload server determines the mime-type, no need to set it.
'Content-Type: application/octet-stream',
'',
file_content,
])
# Finalize the form body
body.extend(['--' + BOUNDARY + '--', ''])
return 'multipart/form-data; boundary=%s' % BOUNDARY, CRLF.join(body)
def upload_find_auth(file_path, project_name, summary, labels=None,
user_name=None, password=None, tries=3):
"""Find credentials and upload a file to a Google Code project's file server.
file_path, project_name, summary, and labels are passed as-is to upload.
Args:
file_path: The local path to the file.
project_name: The name of your project on Google Code.
summary: A small description for the file.
labels: an optional list of label strings with which to tag the file.
config_dir: Path to Subversion configuration directory, 'none', or None.
user_name: Your Google account name.
tries: How many attempts to make.
"""
while tries > 0:
if user_name is None:
# Read username if not specified or loaded from svn config, or on
# subsequent tries.
sys.stdout.write('Please enter your googlecode.com username: ')
sys.stdout.flush()
user_name = sys.stdin.readline().rstrip()
if password is None:
# Read password if not loaded from svn config, or on subsequent tries.
print 'Please enter your googlecode.com password.'
print '** Note that this is NOT your Gmail account password! **'
print 'It is the password you use to access Subversion repositories,'
print 'and can be found here: http://code.google.com/hosting/settings'
password = getpass.getpass()
status, reason, url = upload(file_path, project_name, user_name, password,
summary, labels)
# Returns 403 Forbidden instead of 401 Unauthorized for bad
# credentials as of 2007-07-17.
if status in [httplib.FORBIDDEN, httplib.UNAUTHORIZED]:
# Rest for another try.
user_name = password = None
tries = tries - 1
else:
# We're done.
break
return status, reason, url
def main():
parser = optparse.OptionParser(usage='googlecode-upload.py -s SUMMARY '
'-p PROJECT [options] FILE')
parser.add_option('-s', '--summary', dest='summary',
help='Short description of the file')
parser.add_option('-p', '--project', dest='project',
help='Google Code project name')
parser.add_option('-u', '--user', dest='user',
help='Your Google Code username')
parser.add_option('-w', '--password', dest='password',
help='Your Google Code password')
parser.add_option('-l', '--labels', dest='labels',
help='An optional list of comma-separated labels to attach '
'to the file')
options, args = parser.parse_args()
if not options.summary:
parser.error('File summary is missing.')
elif not options.project:
parser.error('Project name is missing.')
elif len(args) < 1:
parser.error('File to upload not provided.')
elif len(args) > 1:
parser.error('Only one file may be specified.')
file_path = args[0]
if options.labels:
labels = options.labels.split(',')
else:
labels = None
status, reason, url = upload_find_auth(file_path, options.project,
options.summary, labels,
options.user, options.password)
if url:
print 'The file was uploaded successfully.'
print 'URL: %s' % url
return 0
else:
print 'An error occurred. Your file was not uploaded.'
print 'Google Code upload server said: %s (%s)' % (reason, status)
return 1
if __name__ == '__main__':
sys.exit(main())
| Python |
import whois
from pprint import pprint
domains = '''
marktplaats.nl
bla.marktplaats.nl
google.info
blabluble.info
calling.com.tw
konly7393.idv.st
www.katzenuhren.de
katzen.tv
produto.arremate.com.br
sHzAP.abowey.com
abowey.com
google.com.br
google.cz
google.fr
google.at
google.jp
www.google.co.jp
yahoo.com
google.at
www.google.org
google.de
google.info
www.google.com
digg.com
imdb.com
microsoft.com
ddarko.org
google.net
www.asp.net
google.co.uk
google.co
google.de
yandex.ru
google.us
google.eu
google.me
google.be
google.biz
google.info
google.name
google.pl
www.ddarko.pl
test.ez.lv
google.it
'''
#domains = ''
for d in domains.split('\n'):
if d:
print('-'*80)
print(d)
w = whois.query(d,ignore_returncode=1)
if w == None:
pass
if w:
wd = w.__dict__
for k, v in wd.items():
print('%20s\t[%s]' % (k, v))
| Python |
from distutils.core import setup
setup(
name='whois',
version='0.6.5',
description='Python module/library for retrieving WHOIS information of domains.',
long_description = open('README').read(),
author='DDarko.org',
author_email='ddarko@ddarko.org',
license='MIT http://www.opensource.org/licenses/mit-license.php',
url='http://code.google.com/p/python-whois/',
platforms = ['any'],
packages=['whois'],
keywords=['Python','WHOIS','TLD','domain','expiration','registrar'],
classifiers=[
'License :: OSI Approved :: MIT License',
'Intended Audience :: Developers',
'Environment :: Console',
'Programming Language :: Python',
'Programming Language :: Python :: 2.5',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.0',
'Programming Language :: Python :: 3.1',
'Programming Language :: Python :: 3.2',
'Topic :: Internet',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
'''
test_suite='testsuite',
entry_points="""
[console_scripts]
cmd = package:main
""",
''' | Python |
import sys
import datetime
PYTHON_VERSION = sys.version_info[0]
class Domain:
def __init__(self, data):
self.name = data['domain_name'][0].strip().lower()
self.registrar = data['registrar'][0].strip()
self.creation_date = str_to_date(data['creation_date'][0])
self.expiration_date = str_to_date(data['expiration_date'][0])
self.last_updated = str_to_date(data['updated_date'][0])
self.name_servers = data['name_servers']
# http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_FORMATS = [
'%d-%b-%Y', # 02-jan-2000
'%d.%m.%Y', # 02.02.2000
'%Y-%m-%d', # 2000-01-02
'%Y.%m.%d', # 2000.01.02
'%Y/%m/%d', # 2005/05/30
'%Y.%m.%d %H:%M:%S', # 2002.09.19 13:00:00
'%Y%m%d %H:%M:%S', # 20110908 14:44:51
'%Y-%m-%d %H:%M:%S', # 2011-09-08 14:44:51
'%d.%m.%Y %H:%M:%S', # 19.09.2002 13:00:00
'%d-%b-%Y %H:%M:%S %Z', # 24-Jul-2009 13:20:03 UTC
'%Y/%m/%d %H:%M:%S (%z)', # 2011/06/01 01:05:01 (+0900)
'%Y%m%d', #'19990518'
'%d/%m/%Y', # 01/06/2011
'%Y/%m/%d %H:%M:%S', # 2011/06/01 01:05:01
'%a %b %d %H:%M:%S %Z %Y', # Tue Jun 21 23:59:59 GMT 2011
'%a %b %d %Y', # Tue Dec 12 2000
'%Y-%m-%dT%H:%M:%S', # 2007-01-26T19:10:31
'%Y-%m-%dT%H:%M:%SZ', # 2007-01-26T19:10:31Z
'%Y-%m-%dt%H:%M:%S%z' # 2009-10-27t15:26:43+0100
'%Y-%m-%dT%H:%M:%S%z', # 2011-03-30T19:36:27+0200
'%Y-%m-%dT%H:%M:%S.%f%z', # 2011-09-08T14:44:51.622265+0300
'%Y-%m-%dt%H:%M:%S.%f', # 2011-09-08t14:44:51.622265
#2012-09-19t15:12:25.493294
]
#'%Y/%m/%d %H:%M:%S (%Z)', # 2011/06/01 01:05:01 (JST)
def str_to_date(s):
s = s.strip().lower()
if not s or s == 'not defined': return
if PYTHON_VERSION < 3: return str_to_date_py2(s)
# TODO: beznadziejne wyjatki !
if s.endswith('+01:00'): s = s.replace('+01:00', '+0100')
elif s.endswith('+02:00'): s = s.replace('+02:00', '+0200')
elif s.endswith('+03:00'): s = s.replace('+03:00', '+0300')
elif s.endswith('+12:00'): s = s.replace('+12:00', '+1200')
elif s.endswith('+13:00'): s = s.replace('+13:00', '+1300')
s = s.replace('(jst)', '(+0900)')
for format in DATE_FORMATS:
try: return datetime.datetime.strptime(s, format)
except ValueError as e: pass
raise ValueError("Unknown date format: '%s'" % s)
def str_to_date_py2(s):
# TODO: beznadziejne wyjatki !
tz = 0
if s.endswith('+01:00'): s = s.replace('+01:00', ''); tz = 1
elif s.endswith('+02:00'): s = s.replace('+02:00', ''); tz = 2
elif s.endswith('+03:00'): s = s.replace('+03:00', ''); tz = 3
elif s.endswith('+12:00'): s = s.replace('+12:00', ''); tz = 12
elif s.endswith('+13:00'): s = s.replace('+13:00', ''); tz = 13
if s.endswith('(jst)'): s = s.replace(' (jst)', ''); tz = 9
for format in DATE_FORMATS:
try: return datetime.datetime.strptime(s, format) + datetime.timedelta(hours=tz)
except ValueError as e: pass
raise ValueError("Unknown date format: '%s'" % s)
| Python |
import subprocess
import time
import sys
import os
PYTHON_VERSION = sys.version_info[0]
CACHE = {}
CACHE_MAX_AGE = 60*60*48 # 48h
if PYTHON_VERSION >= 3:
import json
else:
import simplejson as json
def cache_load(cf):
if not os.path.isfile(cf): return
global CACHE
f = open(cf, 'r')
try: CACHE = json.load(f)
except: pass
f.close()
def cache_save(cf):
global CACHE
f = open(cf, 'w')
json.dump(CACHE, f)
f.close()
def do_query(dl, force=0, cache_file=None, slow_down=0, ignore_returncode=0):
k = '.'.join(dl)
if cache_file: cache_load(cache_file)
if force or k not in CACHE or CACHE[k][0] < time.time() - CACHE_MAX_AGE:
CACHE[k] = (
int(time.time()),
_do_whois_query(dl, ignore_returncode),
)
if cache_file: cache_save(cache_file)
if slow_down: time.sleep(slow_down)
# add sleep for .nl domains, they only allow 1 request/second
# additionally they have an unknown daily limit
#print type(dl[-1])
if dl[-1] == "nl": time.sleep(1)
return CACHE[k][1]
def _do_whois_query(dl, ignore_returncode):
"""
Linux 'whois' command wrapper
"""
p = subprocess.Popen(['whois', '.'.join(dl)], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
r = p.communicate()[0]
r = r.decode() if PYTHON_VERSION == 3 else r
if not ignore_returncode and p.returncode != 0: raise Exception(r)
return r
"""
import socket
def _do_whois_query(dl):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((('%s.whois-servers.net' % dl[-1], 43)))
s.send(("%s\r\n" % '.'.join(dl)).encode())
response = []
while 1:
t = s.recv(4096)
response.append(t)
if t == b'': break
s.close()
return b''.join(response).decode()
""" | Python |
"""
Python module/library for retrieving WHOIS information of domains.
By DDarko.org ddarko@ddarko.org http://ddarko.org/
License MIT http://www.opensource.org/licenses/mit-license.php
Usage example
>>> import whois
>>> domain = whois.query('google.com')
>>> print(domain.__dict__)
{'expiration_date': datetime.datetime(2020, 9, 14, 0, 0), 'last_updated': datetime.datetime(2011, 7, 20, 0, 0), 'registrar': 'MARKMONITOR INC.', 'name': 'google.com', 'creation_date': datetime.datetime(1997, 9, 15, 0, 0)}
>>> print(domain.name)
google.com
>>> print(domain.expiration_date)
2020-09-14 00:00:00
"""
from ._1_query import do_query
from ._2_parse import do_parse, TLD_RE
from ._3_adjust import Domain
CACHE_FILE = None
SLOW_DOWN = 0
def query(domain, force=0, cache_file=None, slow_down=0, ignore_returncode=0):
"""
force=1 <bool> Don't use cache.
cache_file=<path> <str> Use file to store cache not only memory.
slow_down=0 <int> Time [s] it will wait after you query WHOIS database. This is useful when there is a limit to the number of requests at a time.
"""
assert isinstance(domain, str), Exception('`domain` - must be <str>')
cache_file = cache_file or CACHE_FILE
slow_down = slow_down or SLOW_DOWN
domain = domain.lower().strip()
d = domain.split('.')
if d[0] == 'www': d = d[1:]
if len(d) == 1: return None
if d[-1] not in TLD_RE.keys(): raise Exception('Unknown TLD: %s\n(all known TLD: %s)' % (d[-1], list(TLD_RE.keys())))
while 1:
pd = do_parse(do_query(d, force, cache_file, slow_down, ignore_returncode), d[-1])
if (not pd or not pd['domain_name'][0]) and len(d) > 2: d = d[1:]
else: break
#print pd['domain_name'][0]
return Domain(pd) if pd and pd['domain_name'][0] else None
| Python |
com = {
'extend': None,
'domain_name': r'Domain Name:\s?(.+)',
'registrar': r'Registrar:\s?(.+)',
'registrant': None,
'creation_date': r'Creation Date:\s?(.+)',
'expiration_date': r'Expiration Date:\s?(.+)',
'updated_date': r'Updated Date:\s?(.+)',
'name_servers': r'Name Server:\s?(.+)',
'status': r'Status:\s?(.+)',
'emails': r'[\w.-]+@[\w.-]+\.[\w]{2,4}',
}
net = {
'extend': 'com',
}
org = {
'extend': 'com',
'creation_date': r'\nCreated On:\s?(.+)',
'updated_date': r'\nLast Updated On:\s?(.+)',
}
uk = {
'extend': 'com',
'registrant': r'Registrant:\n\s*(.+)',
'creation_date': r'Registered on:\s*(.+)',
'expiration_date': r'Renewal date:\s*(.+)',
'updated_date': r'Last updated:\s*(.+)',
'name_servers': r'Name Servers:\s*(.+)\s*',
'status': r'Registration status:\n\s*(.+)',
}
pl = {
'extend': 'uk',
'creation_date': r'\ncreated:\s*(.+)\n',
'updated_date': r'\nlast modified:\s*(.+)\n',
'name_servers': r'\nnameservers:\s*(.+) ',
'status': r'\nStatus:\n\s*(.+)',
}
ru = {
'extend': 'com',
'domain_name': r'\ndomain:\s*(.+)',
'creation_date': r'\ncreated:\s*(.+)',
'expiration_date': r'\npaid-till:\s*(.+)',
'name_servers': r'\nnserver:\s*(.+)',
'status': r'\nstate:\s*(.+)',
}
lv = {
'extend': 'ru',
'creation_date': r'Registered:\s*(.+)\n',
'updated_date': r'Changed:\s*(.+)\n',
'status': r'Status:\s?(.+)',
}
jp = {
'domain_name': r'\[Domain Name\]\s?(.+)',
'registrar': None,
'registrant': r'\[Registrant\]\s?(.+)',
'creation_date': r'\[Created on\]\s?(.+)',
'expiration_date': r'\[Expires on\]\s?(.+)',
'updated_date': r'\[Last Updated\]\s?(.+)',
'name_servers': r'\[Name Server\]\s*(.+)',
'status': r'\[Status\]\s?(.+)',
'emails': r'[\w.-]+@[\w.-]+\.[\w]{2,4}',
}
de = {
'extend': 'com',
'domain_name': r'\ndomain:\s*(.+)',
'name_servers': r'Nserver:\s*(.+)',
'updated_date': r'\nChanged:\s?(.+)',
}
at = {
'extend': 'com',
'domain_name': r'domain:\s?(.+)',
'updated_date': r'changed:\s?(.+)',
'name_servers': r'nserver:\s*(.+)',
}
eu = {
'extend': 'com',
'domain_name': r'\ndomain:\s*(.+)',
'registrar': r'Name:\s?(.+)',
}
biz = {
'extend': 'com',
'registrar': r'Sponsoring Registrar:\s?(.+)',
'registrant': r'Registrant Organization:\s?(.+)',
'creation_date': r'Domain Registration Date:\s?(.+)',
'expiration_date': r'Domain Expiration Date:\s?(.+)',
'updated_date': r'Domain Last Updated Date:\s?(.+)',
'status': None,
}
info = {
'extend': 'biz',
'creation_date': r'Created On:\s?(.+)',
'expiration_date': r'Expiration Date:\s?(.+)',
'updated_date': r'Last Updated On:\s?(.+)',
#'name_servers': r'Name Server:\s?(.+)',
'status': r'Status:\s?(.+)',
}
name = {
'extend': 'com',
'status': r'Domain Status:\s?(.+)',
}
us = {
'extend': 'name',
}
br = {
'extend': 'at',
'registrar': None,
'registrant': r'owner:\s?(.+)',
'creation_date': r'created:\s?(.+) #?.*',
'expiration_date': r'expires:\s?(.+)',
'updated_date': r'\[Last Updated\]\s?(.+)',
'status': r'status:\s?(.+)',
}
co = {
'extend': 'biz',
'status': r'Status:\s?(.+)',
}
me = {
'extend': 'biz',
'creation_date': r'Domain Create Date:\s?(.+)',
'expiration_date': r'Domain Expiration Date:\s?(.+)',
'updated_date': r'Domain Last Updated Date:\s?(.+)',
'name_servers': r'Nameservers:\s?(.+)',
'status': r'Domain Status:\s?(.+)',
}
be = {
'extend': 'pl',
'domain_name': r'\nDomain:\s*(.+)',
'registrar': r'Company Name:\n?(.+)',
'creation_date': r'Registered:\s*(.+)\n',
'status': r'Status:\s?(.+)',
}
nz = {
'extend': None,
'domain_name': r'domain_name:\s?(.+)',
'registrar': r'registrar_name:\s?(.+)',
'registrant': r'registrant_contact_name:\s?(.+)',
'creation_date': r'domain_dateregistered:\s?(.+)',
'expiration_date': r'domain_datebilleduntil:\s?(.+)',
'updated_date': r'domain_datelastmodified:\s?(.+)',
'name_servers': r'ns_name_[0-9]{2}:\s?(.+)',
'status': r'query_status:\s?(.+)',
'emails': r'[\w.-]+@[\w.-]+\.[\w]{2,4}',
}
cz = {
'extend': 'com',
'domain_name': r'Domain:\s?(.+)',
'registrar': r'registrar:\s?(.+)',
'registrant': r'registrant:\s?(.+)',
'creation_date': r'registered:\s?(.+)',
'expiration_date': r'expire:\s?(.+)',
'updated_date': r'changed:\s?(.+)',
'name_servers': r'nserver:\s*(.+) ',
}
it = {
'extend': 'com',
'domain_name': r'Domain:\s?(.+)',
'registrar': r'Registrar:\s*Organization:\s*(.+)',
'registrant': r'Registrant:\s?Name:\s?(.+)',
'creation_date': r'Created:\s?(.+)',
'expiration_date': r'Expire Date:\s?(.+)',
'updated_date': r'Last Update:\s?(.+)',
'name_servers': r'Nameservers:\s?(.+)\s?(.+)\s?(.+)\s?(.+)',
'status': r'Status:\s?(.+)',
}
fr = {
'extend': 'com',
'domain_name': r'domain:\s?(.+)',
'registrar': r'registrar:\s*(.+)',
'registrant': r'contact:\s?(.+)',
'creation_date': r'created:\s?(.+)',
'expiration_date': None,
'updated_date': r'last-update:\s?(.+)',
'name_servers': r'nserver:\s*(.+)',
'status': r'status:\s?(.+)',
}
tv = {
'extend': 'com',
'domain_name': r'\s?Domain Name:\s?(.+)',
'name_servers': r'\s?Name Server:\s?(.+)',
'status': r'\s?Domain Status:\s?(.+)',
'creation_date': r'\s?Creation Date:\s?(.+)',
'expiration_date': r'\s?Expiration Date:\s?(.+)',
'updated_date': r'\s?Updated Date:\s?(.+)',
'registrar': r'\s?Registrar:\s?(.+)',
}
st = {
'extend': 'tv',
'domain_name': r'\s?Domain Name:\s?(.+)',
'name_servers': r'\s?Name Server:\s?(.+)',
'status': r'\s?Domain Status:\s?(.+)',
'creation_date': r'\s?Creation Date:\s?(.+)',
'updated_date': r'\s?Updated Date:\s?(.+)',
'registrar': r'\s?Registrar:\s?(.+)',
}
nl = {
'extend': 'com',
'domain_name': r'\s?Domain name:\s?(.+)',
'name_servers': r'\s?Domain nameservers:\s?(.+)',
'status': r'\s?Domain Status:\s?(.+)',
'creation_date': None,
'updated_date': None,
'expiration_date': None,
'registrar': r'\s?Registrar:\s?(.+)',
}
tw = {
'extend': 'com',
'domain_name': r'Domain Name:\s?(.+)',
'name_servers': r'\s?Domain servers in listed order:\s?(.+)\s?(.+)',
'status': None,
'creation_date': r'\s?Record created on\s?(.+)\s?\(YYYY-MM-DD\)',
'expiration_date': r'\s?Record expires on\s?(.+)\s?\(YYYY-MM-DD\)',
'registrar': r'\s?Registration Service Provider:\s?(.+)',
}
| Python |
from . import tld_regexpr
import re
TLD_RE = {}
def get_tld_re(tld):
if tld in TLD_RE: return TLD_RE[tld]
v = getattr(tld_regexpr, tld)
extend = v.get('extend')
if extend:
e = get_tld_re(extend)
tmp = e.copy()
tmp.update(v)
else:
tmp = v
if 'extend' in tmp: del tmp['extend']
TLD_RE[tld] = dict((k, re.compile(v, re.IGNORECASE) if isinstance(v, str) else v) for k, v in tmp.items())
return TLD_RE[tld]
[get_tld_re(tld) for tld in dir(tld_regexpr) if tld[0] != '_']
#from pprint import pprint
def do_parse(whois_str, tld):
r = {}
if whois_str.count('\n') < 5:
s = whois_str.strip().lower()
if s == 'not found': return
if s.count('error'): return
if 'no entries found' in s: return
raise Exception(whois_str)
sn = re.findall(r'Server Name:\s?(.+)', whois_str, re.IGNORECASE)
if sn:
whois_str = whois_str[whois_str.find('Domain Name:'):]
for k, v in TLD_RE.get(tld, TLD_RE['com']).items():
if v is None:
r[k] = ['']
else:
r[k] = v.findall(whois_str) or ['']
#pprint(r)
return r | Python |
#!/usr/bin/env python
import ConfigParser
import os
import sys
import time
from threading import Thread
from optparse import OptionParser
import subprocess
def getServers(sectionname) :
"""
Returns a list of hostnames for a given sectionname in the configuration file.
If the section was a group, we'll traverse the configuration further
"""
servers = []
if not config.has_section(sectionname): raise RuntimeError('Server or group ' + sectionname + ' not found in configuration')
# Checking if the configuration file has a 'host' section
if config.has_option(sectionname,'host'):
servers.append(config.get(sectionname,'host'))
# Checking if the configuration has a 'group' section
if (config.has_option(sectionname,'group')):
grouplist = config.get(sectionname,'group').split(',')
for i in grouplist :
servers+=getServers(i)
return servers
class FloepCommand(Thread) :
"""
This is our worker process
It's responsible for executing the command, and storing the results afterwards
"""
def __init__ (self,host,command,options) :
Thread.__init__(self)
self.host = host
self.command = command
self.options = options
def run(self) :
commandstring = self.options.command % {'host' : self.host, 'command' : self.command}
process = subprocess.Popen(commandstring,stderr=subprocess.STDOUT,stdout=subprocess.PIPE,shell=True).stdout
self.result = process.read()
def readConfiguration() :
global commandTemplate
global defaultGroup
config.read(os.path.dirname(sys.argv[0]) + '/floep.cfg')
config.read('floep.cfg')
if config.has_option('settings','commandTemplate'):
commandTemplate = config.get('settings','commandTemplate')
else:
commandTemplate = 'ssh %(host)s %(command)s'
if config.has_option('settings','defaultGroup'):
defaultGroup = config.get('settings','defaultGroup')
else:
defaultGroup = 'all'
def parseOptions() :
global commandTemplate
parser = OptionParser(
version="floep 0.1",
description="Executes a command on multiple hosts"
)
parser.disable_interspersed_args()
parser.add_option(
'-g','--group',
default=defaultGroup,
type="string",
help="Execute command on group",
dest="group")
parser.add_option(
'-q','--quiet',
action="store_false",
help="Display only server output",
dest="verbose",
default=True)
parser.add_option(
'-c','--commandTemplate',
default=commandTemplate,
dest="command",
help="The commandline to execute for each host")
options,args = parser.parse_args()
if options.verbose:
print "floep 0.1"
if not args :
parser.error('no command given')
command = " ".join(args)
return options,command
def runCommandOnServers(servers,command,options) :
threadlist = []
for server in servers :
current = FloepCommand(server,command,options)
threadlist.append(current)
current.start()
while threadlist :
for server in threadlist :
if not server.isAlive():
server.join()
if options.verbose:
print "Result from", server.host, ":"
print server.result,
if options.verbose:
print ""
threadlist.remove(server)
break
else :
time.sleep(0.010)
commandTemplate = ''
config = ConfigParser.RawConfigParser()
def main() :
readConfiguration()
options,command = parseOptions()
servers = getServers(options.group)
runCommandOnServers(servers,command,options)
if __name__ == "__main__" :
main()
| Python |
Subsets and Splits
SQL Console for ajibawa-2023/Python-Code-Large
Provides a useful breakdown of language distribution in the training data, showing which languages have the most samples and helping identify potential imbalances across different language groups.