code stringlengths 1 1.72M | language stringclasses 1 value |
|---|---|
#!/usr/bin/python2.4
#
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import sys
from django.core.management.base import BaseCommand
class Command(BaseCommand):
"""Overrides the default Django flush command.
"""
help = 'Clears the current datastore and loads the initial fixture data.'
def run_from_argv(self, argv):
from django.db import connection
connection.flush()
from django.core.management import call_command
call_command('loaddata', 'initial_data')
def handle(self, *args, **kwargs):
self.run_from_argv(None)
| Python |
#!/usr/bin/python2.4
#
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import logging
from django.core.management.base import BaseCommand
def run_appcfg():
# import this so that we run through the checks at the beginning
# and report the appropriate errors
import appcfg
# We don't really want to use that one though, it just executes this one
from google.appengine.tools import appcfg
# Reset the logging level to WARN as appcfg will spew tons of logs on INFO
logging.getLogger().setLevel(logging.WARN)
# Note: if we decide to change the name of this command to something other
# than 'rollback' we will have to munge the args to replace whatever
# we called it with 'rollback'
new_args = sys.argv[:]
new_args.append('.')
appcfg.main(new_args)
class Command(BaseCommand):
"""Calls the appcfg.py's rollback command for the current project.
Any additional arguments are passed directly to appcfg.py.
"""
help = 'Calls appcfg.py rollback for the current project.'
args = '[any appcfg.py options]'
def run_from_argv(self, argv):
run_appcfg()
| Python |
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import django
from django.core.management.commands import startapp
import appengine_django
class Command(startapp.Command):
def handle_label(self, *args, **kwds):
"""Temporary adjust django.__path__ to load app templates from the
helpers directory.
"""
old_path = django.__path__
django.__path__ = appengine_django.__path__
startapp.Command.handle_label(self, *args, **kwds)
django.__path__ = old_path
class ProjectCommand(Command):
def __init__(self, project_directory):
super(ProjectCommand, self).__init__()
self.project_directory = project_directory
def handle_label(self, app_name, **options):
super(ProjectCommand, self).handle_label(app_name, self.project_directory,
**options)
| Python |
#!/usr/bin/python2.4
#
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
from appengine_django.db.base import destroy_datastore
from appengine_django.db.base import get_test_datastore_paths
from django.core.management.base import BaseCommand
class Command(BaseCommand):
"""Overrides the default Django testserver command.
Instead of starting the default Django development server this command fires
up a copy of the full fledged appengine dev_appserver.
The appserver is always initialised with a blank datastore with the specified
fixtures loaded into it.
"""
help = 'Runs the development server with data from the given fixtures.'
def run_from_argv(self, argv):
fixtures = argv[2:]
# Ensure an on-disk test datastore is used.
from django.db import connection
connection.use_test_datastore = True
connection.test_datastore_inmemory = False
# Flush any existing test datastore.
connection.flush()
# Load the fixtures.
from django.core.management import call_command
call_command('loaddata', 'initial_data')
if fixtures:
call_command('loaddata', *fixtures)
# Build new arguments for dev_appserver.
datastore_path, history_path = get_test_datastore_paths(False)
new_args = argv[0:1]
new_args.extend(['--datastore_path', datastore_path])
new_args.extend(['--history_path', history_path])
new_args.extend([os.getcwdu()])
# Add email settings
from django.conf import settings
new_args.extend(['--smtp_host', settings.EMAIL_HOST,
'--smtp_port', str(settings.EMAIL_PORT),
'--smtp_user', settings.EMAIL_HOST_USER,
'--smtp_password', settings.EMAIL_HOST_PASSWORD])
# Allow skipped files so we don't die
new_args.extend(['--allow_skipped_files'])
# Start the test dev_appserver.
from google.appengine.tools import dev_appserver_main
dev_appserver_main.main(new_args)
| Python |
#!/usr/bin/python2.4
#
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import code
import getpass
import os
import sys
from django.conf import settings
from django.core.management.base import BaseCommand
from google.appengine.ext.remote_api import remote_api_stub
def auth_func():
return raw_input('Username:'), getpass.getpass('Password:')
class Command(BaseCommand):
""" Start up an interactive console backed by your app using remote_api """
help = 'Start up an interactive console backed by your app using remote_api.'
def run_from_argv(self, argv):
app_id = argv[2]
if len(argv) > 3:
host = argv[3]
else:
host = '%s.appspot.com' % app_id
remote_api_stub.ConfigureRemoteDatastore(app_id,
'/remote_api',
auth_func,
host)
code.interact('App Engine interactive console for %s' % (app_id,),
None,
locals())
| Python |
#!/usr/bin/python2.4
#
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests that the serialization modules are functioning correctly.
In particular, these tests verify that the modifications made to the standard
Django serialization modules function correctly and that the combined datastore
and Django models can be dumped and loaded to all of the provided formats.
"""
import os
import re
import unittest
from StringIO import StringIO
from django.core import serializers
from google.appengine.ext import db
from appengine_django.models import BaseModel
class ModelA(BaseModel):
description = db.StringProperty()
class ModelB(BaseModel):
description = db.StringProperty()
friend = db.Reference(ModelA)
class TestAllFormats(type):
def __new__(cls, name, bases, attrs):
"""Extends base test functions to be called for every serialisation format.
Looks for functions matching 'run.*Test', where the wildcard in the middle
matches the desired test name and ensures that a test case is setup to call
that function once for every defined serialisation format. The test case
that is created will be called 'test<format><name>'. Eg, for the function
'runKeyedObjectTest' functions like 'testJsonKeyedObject' will be created.
"""
test_formats = serializers.get_serializer_formats()
test_formats.remove("python") # Python serializer is only used indirectly.
for func_name in attrs.keys():
m = re.match("^run(.*)Test$", func_name)
if not m:
continue
for format in test_formats:
test_name = "test%s%s" % (format.title(), m.group(1))
test_func = eval("lambda self: getattr(self, \"%s\")(\"%s\")" %
(func_name, format))
attrs[test_name] = test_func
return super(TestAllFormats, cls).__new__(cls, name, bases, attrs)
class SerializationTest(unittest.TestCase):
"""Unit tests for the serialization/deserialization functionality.
Tests that every loaded serialization format can successfully dump and then
reload objects without the objects changing.
"""
__metaclass__ = TestAllFormats
def compareObjects(self, orig, new, format="unknown"):
"""Compares two objects to ensure they are identical.
Args:
orig: The original object, must be an instance of db.Model.
new: The new object, must be an instance of db.Model.
format: The serialization format being tested, used to make error output
more helpful.
Raises:
The function has no return value, but will raise assertion errors if the
objects do not match correctly.
"""
if orig.key().name():
# Only compare object keys when the key is named. Key IDs are not static
# and will change between dump/load. If you want stable Keys they need to
# be named!
self.assertEqual(orig.key(), new.key(),
"keys not equal after %s serialization: %s != %s" %
(format, repr(orig.key()), repr(new.key())))
for key in orig.properties().keys():
oval = getattr(orig, key)
nval = getattr(new, key)
if isinstance(orig.properties()[key], db.Reference):
# Need to compare object keys not the objects themselves.
oval = oval.key()
nval = nval.key()
self.assertEqual(oval, nval, "%s attribute differs after %s "
"serialization: %s != %s" % (key, format, oval, nval))
def doSerialisationTest(self, format, obj, rel_attr=None, obj_ref=None):
"""Runs a serialization test on an object for the specified format.
Args:
format: The name of the Django serialization class to use.
obj: The object to {,de}serialize, must be an instance of db.Model.
rel_attr: Name of the attribute of obj references another model.
obj_ref: The expected object reference, must be an instance of db.Model.
Raises:
The function has no return value but raises assertion errors if the
object cannot be successfully serialized and then deserialized back to an
identical object. If rel_attr and obj_ref are specified the deserialized
object must also retain the references from the original object.
"""
serialised = serializers.serialize(format, [obj])
# Try and get the object back from the serialized string.
result = list(serializers.deserialize(format, StringIO(serialised)))
self.assertEqual(1, len(result),
"%s serialization should create 1 object" % format)
result[0].save() # Must save back into the database to get a Key.
self.compareObjects(obj, result[0].object, format)
if rel_attr and obj_ref:
rel = getattr(result[0].object, rel_attr)
if callable(rel):
rel = rel()
self.compareObjects(rel, obj_ref, format)
def doLookupDeserialisationReferenceTest(self, lookup_dict, format):
"""Tests the Key reference is loaded OK for a format.
Args:
lookup_dict: A dictionary indexed by format containing serialized strings
of the objects to load.
format: The format to extract from the dict and deserialize.
Raises:
This function has no return value but raises assertion errors if the
string cannot be deserialized correctly or the resulting object does not
reference the object correctly.
"""
if format not in lookup_dict:
# Check not valid for this format.
return
obj = ModelA(description="test object", key_name="test")
obj.put()
s = lookup_dict[format]
result = list(serializers.deserialize(format, StringIO(s)))
self.assertEqual(1, len(result), "expected 1 object from %s" % format)
result[0].save()
self.compareObjects(obj, result[0].object.friend, format)
def doModelKeyDeserialisationReferenceTest(self, lookup_dict, format):
"""Tests a model with a key can be loaded OK for a format.
Args:
lookup_dict: A dictionary indexed by format containing serialized strings
of the objects to load.
format: The format to extract from the dict and deserialize.
Returns:
This function has no return value but raises assertion errors if the
string cannot be deserialized correctly or the resulting object is not an
instance of ModelA with a key named 'test'.
"""
if format not in lookup_dict:
# Check not valid for this format.
return
s = lookup_dict[format]
result = list(serializers.deserialize(format, StringIO(s)))
self.assertEqual(1, len(result), "expected 1 object from %s" % format)
result[0].save()
self.assert_(isinstance(result[0].object, ModelA))
self.assertEqual("test", result[0].object.key().name())
# Lookup dicts for the above (doLookupDeserialisationReferenceTest) function.
SERIALIZED_WITH_KEY_AS_LIST = {
"json": """[{"pk": "agR0ZXN0chMLEgZNb2RlbEIiB21vZGVsYmkM", """
""""model": "tests.modelb", "fields": {"description": "test", """
""""friend": ["ModelA", "test"] }}]""",
"yaml": """- fields: {description: !!python/unicode 'test', friend: """
""" [ModelA, test]}\n model: tests.modelb\n pk: """
""" agR0ZXN0chMLEgZNb2RlbEEiB21vZGVsYWkM\n"""
}
SERIALIZED_WITH_KEY_REPR = {
"json": """[{"pk": "agR0ZXN0chMLEgZNb2RlbEIiB21vZGVsYmkM", """
""""model": "tests.modelb", "fields": {"description": "test", """
""""friend": "datastore_types.Key.from_path("""
"""'ModelA', 'test')" }}]""",
"yaml": """- fields: {description: !!python/unicode 'test', friend: """
"""\'datastore_types.Key.from_path("ModelA", "test")\'}\n """
"""model: tests.modelb\n pk: """
""" agR0ZXN0chMLEgZNb2RlbEEiB21vZGVsYWkM\n"""
}
# Lookup dict for the doModelKeyDeserialisationReferenceTest function.
MK_SERIALIZED_WITH_LIST = {
"json": """[{"pk": ["ModelA", "test"], "model": "tests.modela", """
""""fields": {}}]""",
"yaml": """-\n fields: {description: null}\n model: tests.modela\n """
"""pk: [ModelA, test]\n"""
}
MK_SERIALIZED_WITH_KEY_REPR = {
"json": """[{"pk": "datastore_types.Key.from_path('ModelA', 'test')", """
""""model": "tests.modela", "fields": {}}]""",
"yaml": """-\n fields: {description: null}\n model: tests.modela\n """
"""pk: \'datastore_types.Key.from_path("ModelA", "test")\'\n"""
}
MK_SERIALIZED_WITH_KEY_AS_TEXT = {
"json": """[{"pk": "test", "model": "tests.modela", "fields": {}}]""",
"yaml": """-\n fields: {description: null}\n model: tests.modela\n """
"""pk: test\n"""
}
# Lookup dict for the function.
SERIALIZED_WITH_NON_EXISTANT_PARENT = {
"json": """[{"pk": "ahhnb29nbGUtYXBwLWVuZ2luZS1kamFuZ29yIgsSBk1vZG"""
"""VsQiIGcGFyZW50DAsSBk1vZGVsQSIEdGVzdAw", """
""""model": "tests.modela", "fields": """
"""{"description": null}}]""",
"yaml": """- fields: {description: null}\n """
"""model: tests.modela\n """
"""pk: ahhnb29nbGUtYXBwLWVuZ2luZS1kamFuZ29yIgsSBk1"""
"""vZGVsQiIGcGFyZW50DAsSBk1vZGVsQSIEdGVzdAw\n""",
"xml": """<?xml version="1.0" encoding="utf-8"?>\n"""
"""<django-objects version="1.0">\n"""
"""<entity kind="tests.modela" key="ahhnb29nbGUtYXBwL"""
"""WVuZ2luZS1kamFuZ29yIgsSBk1vZGVsQiIGcGFyZW50DA"""
"""sSBk1vZGVsQSIEdGVzdAw">\n """
"""<key>tag:google-app-engine-django.gmail.com,"""
"""2008-05-13:ModelA[ahhnb29nbGUtYXBwLWVuZ2luZS1kam"""
"""FuZ29yIgsSBk1vZGVsQiIGcGFyZW50DAsSBk1vZGVsQSIEdGVzdAw"""
"""]</key>\n <property name="description" """
"""type="null"></property>\n</entity>\n</django-objects>"""
}
# The following functions are all expanded by the metaclass to be run once
# for every registered Django serialization module.
def runKeyedObjectTest(self, format):
"""Test serialization of a basic object with a named key."""
obj = ModelA(description="test object", key_name="test")
obj.put()
self.doSerialisationTest(format, obj)
def runObjectWithIdTest(self, format):
"""Test serialization of a basic object with a numeric ID key."""
obj = ModelA(description="test object")
obj.put()
self.doSerialisationTest(format, obj)
def runObjectWithReferenceTest(self, format):
"""Test serialization of an object that references another object."""
obj = ModelA(description="test object", key_name="test")
obj.put()
obj2 = ModelB(description="friend object", friend=obj)
obj2.put()
self.doSerialisationTest(format, obj2, "friend", obj)
def runObjectWithParentTest(self, format):
"""Test serialization of an object that has a parent object reference."""
obj = ModelA(description="parent object", key_name="parent")
obj.put()
obj2 = ModelA(description="child object", key_name="child", parent=obj)
obj2.put()
self.doSerialisationTest(format, obj2, "parent", obj)
def runObjectWithNonExistantParentTest(self, format):
"""Test deserialization of an object referencing a non-existant parent."""
self.doModelKeyDeserialisationReferenceTest(
self.SERIALIZED_WITH_NON_EXISTANT_PARENT, format)
def runCreateKeyReferenceFromListTest(self, format):
"""Tests that a reference specified as a list in json/yaml can be loaded OK."""
self.doLookupDeserialisationReferenceTest(self.SERIALIZED_WITH_KEY_AS_LIST,
format)
def runCreateKeyReferenceFromReprTest(self, format):
"""Tests that a reference specified as repr(Key) in can loaded OK."""
self.doLookupDeserialisationReferenceTest(self.SERIALIZED_WITH_KEY_REPR,
format)
def runCreateModelKeyFromListTest(self, format):
"""Tests that a model key specified as a list can be loaded OK."""
self.doModelKeyDeserialisationReferenceTest(self.MK_SERIALIZED_WITH_LIST,
format)
def runCreateModelKeyFromReprTest(self, format):
"""Tests that a model key specified as a repr(Key) can be loaded OK."""
self.doModelKeyDeserialisationReferenceTest(
self.MK_SERIALIZED_WITH_KEY_REPR, format)
def runCreateModelKeyFromTextTest(self, format):
"""Tests that a reference specified as a plain key_name loads OK."""
self.doModelKeyDeserialisationReferenceTest(
self.MK_SERIALIZED_WITH_KEY_AS_TEXT, format)
if __name__ == '__main__':
unittest.main()
| Python |
#!/usr/bin/python2.4
#
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests that the core module functionality is present and functioning."""
import unittest
from django.test import TestCase as DjangoTestCase
from appengine_django import appid
from appengine_django import have_appserver
class AppengineDjangoTest(unittest.TestCase):
"""Tests that the helper module has been correctly installed."""
def testAppidProvided(self):
"""Tests that application ID and configuration has been loaded."""
self.assert_(appid is not None)
def testAppserverDetection(self):
"""Tests that the appserver detection flag is present and correct."""
# It seems highly unlikely that these tests would ever be run from within
# an appserver.
self.assertEqual(have_appserver, False)
class DjangoTestCaseTest(DjangoTestCase):
"""Tests that the tests can be subclassed from Django's TestCase class."""
def testPassing(self):
"""Tests that tests with Django's TestCase class work."""
self.assert_(True)
| Python |
#!/usr/bin/python2.4
#
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Tests that the manage.py commands execute correctly.
These tests only verify that the commands execute and exit with a success code.
They are intended to catch import exceptions and similar problems, it is left
up to tests in other modules to verify that the functionality of each command
works correctly.
"""
import os
import re
import signal
import subprocess
import tempfile
import time
import unittest
from django.db.models import get_models
from google.appengine.ext import db
from appengine_django.models import BaseModel
from appengine_django.models import ModelManager
from appengine_django.models import ModelOptions
from appengine_django.models import RegistrationTestModel
class CommandsTest(unittest.TestCase):
"""Unit tests for the manage.py commands."""
# How many seconds to wait for a command to exit.
COMMAND_TIMEOUT = 10
def runCommand(self, command, args=None, int_after=None, input=None):
"""Helper to run the specified command in a child process.
Args:
command: The name of the command to run.
args: List of command arguments to run the command with.
int_after: If set to a positive integer, SIGINT will be sent to the
running child process after this many seconds to cause an exit. This
should be less than the COMMAND_TIMEOUT value (10 seconds).
input: A string to write to stdin when the command starts. stdin is
closed after the string is written.
Returns:
rc: The integer return code of the process.
output: A string containing the childs output.
"""
if not args:
args = []
start = time.time()
int_sent = False
fd = subprocess.PIPE
child = subprocess.Popen(["./manage.py", command] + args, stdin=fd,
stdout=fd, stderr=fd, cwd=os.getcwdu())
if input:
child.stdin.write(input)
child.stdin.close()
while 1:
rc = child.poll()
if rc is not None:
# Child has exited.
break
elapsed = time.time() - start
if int_after and int_after > 0 and elapsed > int_after and not int_sent:
# Sent SIGINT as requested, give child time to exit cleanly.
os.kill(child.pid, signal.SIGINT)
start = time.time()
int_sent = True
continue
if elapsed < self.COMMAND_TIMEOUT:
continue
# Command is over time, kill and exit loop.
os.kill(child.pid, signal.SIGKILL)
time.sleep(2) # Give time for the signal to be received.
break
# Return status and output.
return rc, child.stdout.read(), child.stderr.read()
def assertCommandSucceeds(self, command, *args, **kwargs):
"""Asserts that the specified command successfully completes.
Args:
command: The name of the command to run.
All other arguments are passed directly through to the runCommand
routine.
Raises:
This function does not return anything but will raise assertion errors if
the command does not exit successfully.
"""
rc, stdout, stderr = self.runCommand(command, *args, **kwargs)
fd, tempname = tempfile.mkstemp()
os.write(fd, stdout)
os.close(fd)
self.assertEquals(0, rc,
"%s did not return successfully (rc: %d): Output in %s" %
(command, rc, tempname))
os.unlink(tempname)
def getCommands(self):
"""Returns a list of valid commands for manage.py.
Args:
None
Returns:
A list of valid commands for manage.py as read from manage.py's help
output.
"""
rc, stdout, stderr = self.runCommand("help")
parts = re.split("Available subcommands:", stderr)
if len(parts) < 2:
return []
return [t.strip() for t in parts[-1].split("\n") if t.strip()]
def testDiffSettings(self):
"""Tests the diffsettings command."""
self.assertCommandSucceeds("diffsettings")
def testDumpData(self):
"""Tests the dumpdata command."""
self.assertCommandSucceeds("dumpdata")
def testFlush(self):
"""Tests the flush command."""
self.assertCommandSucceeds("flush")
def testLoadData(self):
"""Tests the loaddata command."""
self.assertCommandSucceeds("loaddata")
def testLoadData(self):
"""Tests the loaddata command."""
self.assertCommandSucceeds("loaddata")
def testReset(self):
"""Tests the reste command."""
self.assertCommandSucceeds("reset", ["appengine_django"])
def testRunserver(self):
"""Tests the runserver command."""
self.assertCommandSucceeds("runserver", int_after=2.0)
def testShell(self):
"""Tests the shell command."""
self.assertCommandSucceeds("shell", input="exit")
def testUpdate(self):
"""Tests that the update command exists.
Cannot test that it works without mocking out parts of dev_appserver so for
now we just assume that if it is present it will work.
"""
cmd_list = self.getCommands()
self.assert_("update" in cmd_list)
def testZipCommandListFiltersCorrectly(self):
"""When running under a zipfile test that only valid commands are found."""
cmd_list = self.getCommands()
self.assert_("__init__" not in cmd_list)
self.assert_("base" not in cmd_list)
| Python |
#!/usr/bin/python2.4
#
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests that the core module functionality is present and functioning."""
import httplib
import logging
import os
import unittest
import sys
import threading
from django import http
from django import test
from django.test import client
from django.conf import settings
from google.appengine.tools import dev_appserver
from google.appengine.tools import dev_appserver_login
PORT = 8000
ROOT_PATH = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
APP_ID = 'google-app-engine-django'
LOGIN_URL = '/_ah/login'
# ########
# NOTE: All this stuff is expected to break with SDK updates
# TODO: get an interface for this into the SDK proper
# ########
def start_server(root_path=ROOT_PATH, port=PORT, app_id=APP_ID):
dev_appserver.ApplicationLoggingHandler.InitializeTemplates(
'HEADER', 'SCRIPT', 'MIDDLE', 'FOOTER')
dev_appserver.SetupStubs(app_id,
login_url=LOGIN_URL,
datastore_path='/dev/null',
history_path='/dev/null',
clear_datastore=False)
server = dev_appserver.CreateServer(ROOT_PATH,
LOGIN_URL,
port,
'/unused/templates/path')
server_thread = threading.Thread(target=server.serve_forever)
server_thread.setDaemon(True)
server_thread.start()
return port
def RetrieveURL(method,
host_port,
relative_url,
user_info=None,
body=None,
extra_headers=[]):
"""Access a URL over HTTP and returns the results.
Args:
method: HTTP method to use, e.g., GET, POST
host_port: Tuple (hostname, port) of the host to contact.
relative_url: Relative URL to access on the remote host.
user_info: If not None, send this user_info tuple in an HTTP Cookie header
along with the request; otherwise, no header is included. The user_info
tuple should be in the form (email, admin) where:
email: The user's email address.
admin: True if the user should be an admin; False otherwise.
If email is empty, it will be as if the user is not logged in.
body: Request body to write to the remote server. Should only be used with
the POST method any other methods that expect a message body.
extra_headers: List of (key, value) tuples for headers to send on the
request.
Returns:
Tuple (status, content, headers) where:
status: HTTP status code returned by the remote host, e.g. 404, 200, 500
content: Data returned by the remote host.
headers: Dictionary mapping header names to header values (both strings).
If an exception is raised while accessing the remote host, both status and
content will be set to None.
"""
url_host = '%s:%d' % host_port
logging.info('Connecting to %s', url_host)
try:
connection = httplib.HTTPConnection(url_host)
logging.info('Sending request "%s %s"', method, relative_url)
try:
connection.putrequest(method, relative_url)
if user_info is not None:
email, admin = user_info
auth_string = '%s=%s' % (dev_appserver_login.COOKIE_NAME,
dev_appserver_login.CreateCookieData(email, admin))
logging.info('Putting auth header: %s', auth_string)
connection.putheader('Cookie', auth_string)
if body is not None:
connection.putheader('Content-length', len(body))
for key, value in extra_headers:
logging.info('Putting header: %s = %s', str(key), str(value))
connection.putheader(str(key), str(value))
connection.endheaders()
if body is not None:
connection.send(body)
response = connection.getresponse()
status = response.status
content = response.read()
headers = dict(response.getheaders())
logging.info('Received response %s with content:\n%s', status, content)
return status, content, headers
finally:
connection.close()
except (IOError, httplib.HTTPException, socket.error), e:
logging.error('Encountered exception accessing HTTP server: %s', e)
raise e
class AppEngineClientHandler(client.ClientHandler):
def __init__(self, port):
super(AppEngineClientHandler, self).__init__()
self._port = port
self._host = 'localhost'
def __call__(self, environ):
method = environ['REQUEST_METHOD']
host_port = (self._host, self._port)
relative_url = environ['PATH_INFO']
if environ['QUERY_STRING']:
relative_url += '?%s' % environ['QUERY_STRING']
body = environ['wsgi.input'].read(environ.get('CONTENT_LENGTH', 0))
headers = [] # Not yet supported
status, content, headers = RetrieveURL(method,
host_port,
relative_url,
body = body,
extra_headers = headers)
response = http.HttpResponse(content = content,
status = status)
for header, value in headers.iteritems():
response[header] = value
return response
class AppEngineClient(client.Client):
def __init__(self, port, *args, **kw):
super(AppEngineClient, self).__init__(*args, **kw)
self.handler = AppEngineClientHandler(port=port)
class IntegrationTest(test.TestCase):
"""Tests that we can make a request."""
def setUp(self):
port = start_server()
self.gae_client = AppEngineClient(port=port)
def testBasic(self):
"""a request to the default page works in the dev_appserver"""
rv = self.gae_client.get('/')
self.assertEquals(rv.status_code, 200)
| Python |
#!/usr/bin/python2.4
#
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests that the combined appengine and Django models function correctly."""
import unittest
from django import VERSION
from django.db.models import get_models
from django import forms
from google.appengine.ext.db import djangoforms
from google.appengine.ext import db
from appengine_django.models import BaseModel
from appengine_django.models import ModelManager
from appengine_django.models import ModelOptions
from appengine_django.models import RegistrationTestModel
class TestModelWithProperties(BaseModel):
"""Test model class for checking property -> Django field setup."""
property1 = db.StringProperty()
property2 = db.IntegerProperty()
property3 = db.Reference()
class ModelTest(unittest.TestCase):
"""Unit tests for the combined model class."""
def testModelRegisteredWithDjango(self):
"""Tests that a combined model class has been registered with Django."""
self.assert_(RegistrationTestModel in get_models())
def testDatastoreModelProperties(self):
"""Tests that a combined model class still has datastore properties."""
self.assertEqual(3, len(TestModelWithProperties.properties()))
def testDjangoModelClass(self):
"""Tests the parts of a model required by Django are correctly stubbed."""
# Django requires model options to be found at ._meta.
self.assert_(isinstance(RegistrationTestModel._meta, ModelOptions))
# Django requires a manager at .objects
self.assert_(isinstance(RegistrationTestModel.objects, ModelManager))
# Django requires ._default_manager.
self.assert_(hasattr(RegistrationTestModel, "_default_manager"))
def testDjangoModelFields(self):
"""Tests that a combined model class has (faked) Django fields."""
fields = TestModelWithProperties._meta.local_fields
self.assertEqual(3, len(fields))
# Check each fake field has the minimal properties that Django needs.
for field in fields:
# The Django serialization code looks for rel to determine if the field
# is a relationship/reference to another model.
self.assert_(hasattr(field, "rel"))
# serialize is required to tell Django to serialize the field.
self.assertEqual(True, field.serialize)
if field.name == "property3":
# Extra checks for the Reference field.
# rel.field_name is used during serialization to find the field in the
# other model that this field is related to. This should always be
# 'key_name' for appengine models.
self.assertEqual("key_name", field.rel.field_name)
def testDjangoModelOptionsStub(self):
"""Tests that the options stub has the required properties by Django."""
# Django requires object_name and app_label for serialization output.
self.assertEqual("RegistrationTestModel",
RegistrationTestModel._meta.object_name)
self.assertEqual("appengine_django", RegistrationTestModel._meta.app_label)
# The pk.name member is required during serialization for dealing with
# related fields.
self.assertEqual("key_name", RegistrationTestModel._meta.pk.name)
# The many_to_many method is called by Django in the serialization code to
# find m2m relationships. m2m is not supported by the datastore.
self.assertEqual([], RegistrationTestModel._meta.many_to_many)
def testDjangoModelManagerStub(self):
"""Tests that the manager stub acts as Django would expect."""
# The serialization code calls model.objects.all() to retrieve all objects
# to serialize.
self.assertEqual([], list(RegistrationTestModel.objects.all()))
def testDjangoModelPK(self):
"""Tests that each model instance has a 'primary key' generated."""
obj = RegistrationTestModel(key_name="test")
obj.put()
pk = obj._get_pk_val()
self.assert_(pk)
new_obj = RegistrationTestModel.get(pk)
self.assertEqual(obj.key(), new_obj.key())
def testModelFormPatched(self):
"""Tests that the Django ModelForm is being successfully patched."""
self.assertEqual(djangoforms.ModelForm, forms.ModelForm)
| Python |
#!/usr/bin/python2.4
#
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Ensures the App Engine memcache API works as Django's memcache backend."""
import unittest
from django.core.cache import get_cache
from appengine_django import appid
from appengine_django import have_appserver
class AppengineMemcacheTest(unittest.TestCase):
"""Tests that the memcache backend works."""
def setUp(self):
"""Get the memcache cache module so it is available to tests."""
self._cache = get_cache("memcached://")
def testSimpleSetGet(self):
"""Tests that a simple set/get operation through the cache works."""
self._cache.set("test_key", "test_value")
self.assertEqual(self._cache.get("test_key"), "test_value")
def testDelete(self):
"""Tests that delete removes values from the cache."""
self._cache.set("test_key", "test_value")
self.assertEqual(self._cache.has_key("test_key"), True)
self._cache.delete("test_key")
self.assertEqual(self._cache.has_key("test_key"), False)
| Python |
#!/usr/bin/python2.4
#
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Loads all the _test.py files into the top level of the package.
This file is a hack around the fact that Django expects the tests "module" to
be a single tests.py file and cannot handle a tests package inside an
application.
All _test.py files inside this package are imported and any classes derived
from unittest.TestCase are then referenced from this file itself so that they
appear at the top level of the tests "module" that Django will import.
"""
import os
import re
import types
import unittest
TEST_RE = r"^.*_test.py$"
# Search through every file inside this package.
test_names = []
test_dir = os.path.dirname( __file__)
for filename in os.listdir(test_dir):
if not re.match(TEST_RE, filename):
continue
# Import the test file and find all TestClass clases inside it.
test_module = __import__('appengine_django.tests.%s' %
filename[:-3], {}, {},
filename[:-3])
for name in dir(test_module):
item = getattr(test_module, name)
if not (isinstance(item, (type, types.ClassType)) and
issubclass(item, unittest.TestCase)):
continue
# Found a test, bring into the module namespace.
exec "%s = item" % name
test_names.append(name)
# Hide everything other than the test cases from other modules.
__all__ = test_names
| Python |
#!/usr/bin/python2.4
#
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests that the db module correctly initialises the API stubs."""
import unittest
from django.db import connection
from django.db.backends.appengine.base import DatabaseWrapper
from appengine_django import appid
from appengine_django.db import base
class DatastoreTest(unittest.TestCase):
"""Tests that the datastore stubs have been correctly setup."""
def testDjangoDBConnection(self):
"""Tests that the Django DB connection is using our replacement."""
self.assert_(isinstance(connection, DatabaseWrapper))
def testDjangoDBConnectionStubs(self):
"""Tests that members required by Django are stubbed."""
self.assert_(hasattr(connection, "features"))
self.assert_(hasattr(connection, "ops"))
def testDjangoDBErrorClasses(self):
"""Tests that the error classes required by Django are stubbed."""
self.assert_(hasattr(base, "DatabaseError"))
self.assert_(hasattr(base, "IntegrityError"))
def testDatastorePath(self):
"""Tests that the datastore path contains the app name."""
d_path, h_path = base.get_datastore_paths()
self.assertNotEqual(-1, d_path.find("django_%s" % appid))
self.assertNotEqual(-1, h_path.find("django_%s" % appid))
def testTestInMemoryDatastorePath(self):
"""Tests that the test datastore is using the in-memory datastore."""
td_path, th_path = base.get_test_datastore_paths()
self.assert_(td_path is None)
self.assert_(th_path is None)
def testTestFilesystemDatastorePath(self):
"""Tests that the test datastore is on the filesystem when requested."""
td_path, th_path = base.get_test_datastore_paths(False)
self.assertNotEqual(-1, td_path.find("testdatastore"))
self.assertNotEqual(-1, th_path.find("testdatastore"))
| Python |
#!/usr/bin/python2.4
#
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Support for integrating a Django project with the appengine infrastructure.
This requires Django 1.0beta1 or greater.
This module enables you to use the Django manage.py utility and *some* of it's
subcommands. View the help of manage.py for exact details.
Additionally this module takes care of initialising the datastore (and a test
datastore) so that the Django test infrastructure can be used for your
appengine project.
To use this module add the following two lines to your main.py and manage.py
scripts at the end of your imports:
from appengine_django import InstallAppengineHelperForDjango
InstallAppengineHelperForDjango()
If you would like to use a version of Django other than that provided by the
system all you need to do is include it in a directory just above this helper,
eg:
appengine_django/__init__.py - This file
django/... - your private copy of Django.
"""
import logging
import os
import re
import sys
import unittest
import zipfile
DIR_PATH = os.path.abspath(os.path.dirname(__file__))
PARENT_DIR = os.path.dirname(DIR_PATH)
if PARENT_DIR.endswith(".zip"):
# Check for appengine_django itself being in a zipfile.
PARENT_DIR = os.path.dirname(PARENT_DIR)
# Add this project to the start of sys path to enable direct imports.
sys.path = [PARENT_DIR,] + sys.path
# Look for a zipped copy of Django.
have_django_zip = False
django_zip_path = os.path.join(PARENT_DIR, 'django.zip')
# Flags made available this module
appid = None
have_appserver = False
# Hide everything other than the flags above and the install function.
__all__ = ("appid", "have_appserver", "have_django_zip",
"django_zip_path", "InstallAppengineHelperForDjango")
INCOMPATIBLE_COMMANDS = ["adminindex", "createcachetable", "dbshell",
"inspectdb", "runfcgi", "syncdb", "validate"]
def FixPython26Logging():
import logging
logging.logMultiprocessing = 0
def LoadSdk():
# Try to import the appengine code from the system path.
try:
from google.appengine.api import apiproxy_stub_map
except ImportError, e:
# Not on the system path. Build a list of alternative paths where it may be.
# First look within the project for a local copy, then look for where the Mac
# OS SDK installs it.
paths = [os.path.join(PARENT_DIR, '.google_appengine'),
os.path.join(PARENT_DIR, 'google_appengine'),
'/usr/local/google_appengine']
# Then if on windows, look for where the Windows SDK installed it.
for path in os.environ.get('PATH', '').split(';'):
path = path.rstrip('\\')
if path.endswith('google_appengine'):
paths.append(path)
try:
from win32com.shell import shell
from win32com.shell import shellcon
id_list = shell.SHGetSpecialFolderLocation(
0, shellcon.CSIDL_PROGRAM_FILES)
program_files = shell.SHGetPathFromIDList(id_list)
paths.append(os.path.join(program_files, 'Google',
'google_appengine'))
except ImportError, e:
# Not windows.
pass
# Loop through all possible paths and look for the SDK dir.
SDK_PATH = None
for sdk_path in paths:
if os.path.exists(sdk_path):
SDK_PATH = os.path.realpath(sdk_path)
break
if SDK_PATH is None:
# The SDK could not be found in any known location.
sys.stderr.write("The Google App Engine SDK could not be found!\n")
sys.stderr.write("See README for installation instructions.\n")
sys.exit(1)
if SDK_PATH == os.path.join(PARENT_DIR, 'google_appengine'):
logging.warn('Loading the SDK from the \'google_appengine\' subdirectory '
'is now deprecated!')
logging.warn('Please move the SDK to a subdirectory named '
'\'.google_appengine\' instead.')
logging.warn('See README for further details.')
# Add the SDK and the libraries within it to the system path.
EXTRA_PATHS = [
SDK_PATH,
os.path.join(SDK_PATH, 'lib', 'antlr3'),
os.path.join(SDK_PATH, 'lib', 'django'),
os.path.join(SDK_PATH, 'lib', 'webob'),
os.path.join(SDK_PATH, 'lib', 'yaml', 'lib'),
]
# Add SDK paths at the start of sys.path, but after the local directory which
# was added to the start of sys.path on line 50 above. The local directory
# must come first to allow the local imports to override the SDK and
# site-packages directories.
sys.path = sys.path[0:1] + EXTRA_PATHS + sys.path[1:]
def LoadDjango(version=None):
global have_django_zip
from google.appengine.dist import use_library
from google.appengine.dist._library import UnacceptableVersionError
# Must set this env var *before* importing any more of Django.
os.environ['DJANGO_SETTINGS_MODULE'] = 'settings'
# If we have set a version explicitly, force that
if version:
use_library('django', version)
return
if os.path.exists(django_zip_path):
have_django_zip = True
sys.path.insert(1, django_zip_path)
# Remove the standard version of Django if a local copy has been provided.
if have_django_zip or os.path.exists(os.path.join(PARENT_DIR, 'django')):
for k in [k for k in sys.modules if k.startswith('django')]:
del sys.modules[k]
return
# If we aren't loading from a zip or local copy then try for whichever
# supported version is highest and installed
for check_version in ('1.1', '1.0'):
try:
use_library('django', check_version)
return
except UnacceptableVersionError:
pass
raise UnacceptableVersionError()
def LoadAppengineEnvironment():
"""Loads the appengine environment.
Returns:
This function has no return value, but it sets the following parameters on
this package:
- appid: The name of the application.
- have_appserver: Boolean parameter which is True if the code is being run
from within the appserver environment.
"""
global appid, have_appserver
from google.appengine.api import yaml_errors
from google.appengine.api import apiproxy_stub_map
# Detect if we are running under an appserver.
have_appserver = False
stub = apiproxy_stub_map.apiproxy.GetStub("datastore_v3")
if stub:
have_appserver = True
# Load the application identifier.
if have_appserver:
appid = os.environ.get("APPLICATION_ID", "unknown")
else:
# Running as manage.py script, read from config file.
try:
from google.appengine.tools import dev_appserver
appconfig, unused_matcher = dev_appserver.LoadAppConfig(PARENT_DIR, {})
appid = appconfig.application
except (ImportError, yaml_errors.EventListenerYAMLError), e:
logging.warn("Could not read the Application ID from app.yaml. "
"This may break things in unusual ways!")
# Something went wrong.
appid = "unknown"
logging.debug("Loading application '%s' %s an appserver" %
(appid, have_appserver and "with" or "without"))
def InstallAppengineDatabaseBackend():
"""Installs the appengine database backend into Django.
The appengine database lives in the db/ subdirectory of this package, but is
known as "appengine" to Django. This function installs the module where
Django expects to find its database backends.
"""
from appengine_django import db
sys.modules['django.db.backends.appengine'] = db
logging.debug("Installed appengine database backend")
def InstallGoogleMemcache():
"""Installs the Google memcache into Django.
By default django tries to import standard memcache module.
Because appengine memcache is API compatible with Python memcache module,
we can trick Django to think it is installed and to use it.
Now you can use CACHE_BACKEND = 'memcached://' in settings.py. IP address
and port number are not required.
"""
from google.appengine.api import memcache
sys.modules['memcache'] = memcache
logging.debug("Installed App Engine memcache backend")
def InstallDjangoModuleReplacements():
"""Replaces internal Django modules with App Engine compatible versions."""
# Replace the session module with a partial replacement overlay using
# __path__ so that portions not replaced will fall through to the original
# implementation.
try:
from django.contrib import sessions
orig_path = sessions.__path__[0]
sessions.__path__.insert(0, os.path.join(DIR_PATH, 'sessions'))
from django.contrib.sessions import backends
backends.__path__.append(os.path.join(orig_path, 'backends'))
except ImportError:
logging.debug("No Django session support available")
# Replace incompatible dispatchers.
import django.core.signals
import django.db
import django.dispatch.dispatcher
# Rollback occurs automatically on Google App Engine. Disable the Django
# rollback handler.
try:
# pre 1.0
from django.dispatch import errors
CheckedException = errors.DispatcherKeyError
def _disconnectSignal():
django.dispatch.dispatcher.disconnc(
django.db._rollback_on_exception,
django.core.signals.got_request_exception)
except ImportError:
CheckedException = KeyError
def _disconnectSignal():
django.core.signals.got_request_exception.disconnect(
django.db._rollback_on_exception)
try:
_disconnectSignal()
except CheckedException, e:
logging.debug("Django rollback handler appears to be already disabled.")
def PatchDjangoSerializationModules(settings):
"""Monkey patches the Django serialization modules.
The standard Django serialization modules to not correctly handle the
datastore models provided by this package. This method installs replacements
for selected modules and methods to give Django the capability to correctly
serialize and deserialize datastore models.
"""
# These can't be imported until InstallAppengineDatabaseBackend has run.
from django.core.serializers import python
from appengine_django.serializer.python import Deserializer
if not hasattr(settings, "SERIALIZATION_MODULES"):
settings.SERIALIZATION_MODULES = {}
base_module = "appengine_django"
settings.SERIALIZATION_MODULES["xml"] = "%s.serializer.xml" % base_module
python.Deserializer = Deserializer
PatchDeserializedObjectClass()
DisableModelValidation()
logging.debug("Installed appengine json and python serialization modules")
def PatchDeserializedObjectClass():
"""Patches the DeserializedObject class.
The default implementation calls save directly on the django Model base
class to avoid pre-save handlers. The model class provided by this package
is not derived from the Django Model class and therefore must be called
directly.
Additionally we need to clear the internal _parent attribute as it may
contain a FakeParent class that is used to deserialize instances without
needing to load the parent instance itself. See the PythonDeserializer for
more details.
"""
# This can't be imported until InstallAppengineDatabaseBackend has run.
from django.core.serializers import base
class NewDeserializedObject(base.DeserializedObject):
def save(self, save_m2m=True):
self.object.save()
self.object._parent = None
base.DeserializedObject = NewDeserializedObject
logging.debug("Replacement DeserializedObject class installed")
def DisableModelValidation():
"""Disables Django's model validation routines.
The model validation is primarily concerned with validating foreign key
references. There is no equivalent checking code for datastore References at
this time.
Validation needs to be disabled or serialization/deserialization will fail.
"""
from django.core.management import validation
validation.get_validation_errors = lambda x, y=0: 0
logging.debug("Django SQL model validation disabled")
def CleanupDjangoSettings(settings):
"""Removes incompatible entries from the django settings module."""
# Ensure this module is installed as an application.
apps = getattr(settings, "INSTALLED_APPS", ())
found = False
for app in apps:
if app.endswith("appengine_django"):
found = True
break
if not found:
logging.warn("appengine_django module is not listed as an application!")
apps += ("appengine_django",)
setattr(settings, "INSTALLED_APPS", apps)
logging.info("Added 'appengine_django' as an application")
# Ensure the database backend is appropriately configured.
dbe = getattr(settings, "DATABASE_ENGINE", "")
if dbe != "appengine":
settings.DATABASE_ENGINE = "appengine"
logging.warn("DATABASE_ENGINE is not configured as 'appengine'. "
"Value overriden!")
for var in ["NAME", "USER", "PASSWORD", "HOST", "PORT"]:
val = getattr(settings, "DATABASE_%s" % var, "")
if val:
setattr(settings, "DATABASE_%s" % var, "")
logging.warn("DATABASE_%s should be blank. Value overriden!")
# Remove incompatible middleware modules.
mw_mods = list(getattr(settings, "MIDDLEWARE_CLASSES", ()))
disallowed_middleware_mods = (
'django.middleware.doc.XViewMiddleware',)
for modname in mw_mods[:]:
if modname in disallowed_middleware_mods:
# Currently only the CommonMiddleware has been ported. As other base
# modules are converted, remove from the disallowed_middleware_mods
# tuple.
mw_mods.remove(modname)
logging.warn("Middleware module '%s' is not compatible. Removed!" %
modname)
setattr(settings, "MIDDLEWARE_CLASSES", tuple(mw_mods))
# Remove incompatible application modules
app_mods = list(getattr(settings, "INSTALLED_APPS", ()))
disallowed_apps = (
'django.contrib.contenttypes',
'django.contrib.sites',)
for app in app_mods[:]:
if app in disallowed_apps:
app_mods.remove(app)
logging.warn("Application module '%s' is not compatible. Removed!" % app)
setattr(settings, "INSTALLED_APPS", tuple(app_mods))
# Remove incompatible session backends.
session_backend = getattr(settings, "SESSION_ENGINE", "")
if session_backend.endswith("file"):
logging.warn("File session backend is not compatible. Overriden "
"to use db backend!")
setattr(settings, "SESSION_ENGINE", "django.contrib.sessions.backends.db")
def ModifyAvailableCommands():
"""Removes incompatible commands and installs replacements where possible."""
if have_appserver:
# Commands are not used when running from an appserver.
return
from django.core import management
project_directory = os.path.join(__path__[0], "../")
if have_django_zip:
FindCommandsInZipfile.orig = management.find_commands
management.find_commands = FindCommandsInZipfile
management.get_commands()
# Replace startapp command which is set by previous call to get_commands().
from appengine_django.management.commands.startapp import ProjectCommand
management._commands['startapp'] = ProjectCommand(project_directory)
RemoveCommands(management._commands)
logging.debug("Removed incompatible Django manage.py commands")
def FindCommandsInZipfile(management_dir):
"""
Given a path to a management directory, returns a list of all the command
names that are available.
This implementation also works when Django is loaded from a zip.
Returns an empty list if no commands are defined.
"""
zip_marker = ".zip%s" % os.sep
if zip_marker not in management_dir:
return FindCommandsInZipfile.orig(management_dir)
# Django is sourced from a zipfile, ask zip module for a list of files.
filename, path = management_dir.split(zip_marker)
zipinfo = zipfile.ZipFile("%s.zip" % filename)
# Add commands directory to management path.
path = os.path.join(path, "commands")
# The zipfile module returns paths in the format of the operating system
# that created the zipfile! This may not match the path to the zipfile
# itself. Convert operating system specific characters to a standard
# character (#) to compare paths to work around this.
path_normalise = re.compile(r"[/\\]")
path = path_normalise.sub("#", path)
def _IsCmd(t):
"""Returns true if t matches the criteria for a command module."""
filename = os.path.basename(t)
t = path_normalise.sub("#", t)
if not t.startswith(path):
return False
if filename.startswith("_") or not t.endswith(".py"):
return False
return True
return [os.path.basename(f)[:-3] for f in zipinfo.namelist() if _IsCmd(f)]
def RemoveCommands(command_dict):
"""Removes incompatible commands from the specified command dictionary."""
for cmd in command_dict.keys():
if cmd.startswith("sql"):
del command_dict[cmd]
elif cmd in INCOMPATIBLE_COMMANDS:
del command_dict[cmd]
def InstallReplacementImpModule():
"""Install a replacement for the imp module removed by the appserver.
This is only to find mangement modules provided by applications.
"""
if not have_appserver:
return
modname = 'appengine_django.replacement_imp'
imp_mod = __import__(modname, {}, [], [''])
sys.modules['imp'] = imp_mod
logging.debug("Installed replacement imp module")
def InstallReplacementThreadingModule():
"""Install a replacement for the python threading module.
This is only to deal with a bug in Django 1.1+
"""
try:
from django.utils._threading_local import local
import threading
threading.local = local
except ImportError:
# We are in Django 1.0
pass
logging.debug("Installed replacement threading module")
def InstallAppengineHelperForDjango(version=None):
"""Installs and Patches all of the classes/methods required for integration.
If the variable DEBUG_APPENGINE_DJANGO is set in the environment verbose
logging of the actions taken will be enabled.
"""
FixPython26Logging()
LoadSdk()
LoadDjango(version)
from django import VERSION
from django.conf import settings
# Adding this again here to solve a problem that happens when context
# switching from webapp.template to django.template.
# TODO(elsigh): Maybe there is a deeper, fixable problem somewhere?
os.environ['DJANGO_SETTINGS_MODULE'] = 'settings'
if VERSION < (1, 0, None):
logging.error("Django 1.0 or greater is required!")
sys.exit(1)
if os.getenv("DEBUG_APPENGINE_DJANGO"):
logging.getLogger().setLevel(logging.DEBUG)
else:
logging.getLogger().setLevel(logging.INFO)
logging.debug("Loading the Google App Engine Helper for Django...")
# Force Django to reload its settings.
settings._target = None
LoadAppengineEnvironment()
InstallReplacementImpModule()
InstallReplacementThreadingModule()
InstallAppengineDatabaseBackend()
InstallModelForm()
InstallGoogleMemcache()
InstallDjangoModuleReplacements()
PatchDjangoSerializationModules(settings)
CleanupDjangoSettings(settings)
ModifyAvailableCommands()
InstallGoogleSMTPConnection()
InstallAuthentication(settings)
logging.debug("Successfully loaded the Google App Engine Helper for Django.")
def InstallGoogleSMTPConnection():
from appengine_django import mail as gmail
from django.core import mail
logging.debug("Installing Google Email Adapter for Django")
mail.SMTPConnection = gmail.GoogleSMTPConnection
mail.mail_admins = gmail.mail_admins
mail.mail_managers = gmail.mail_managers
def InstallAuthentication(settings):
if "django.contrib.auth" not in settings.INSTALLED_APPS:
return
try:
from appengine_django.auth import models as helper_models
from django.contrib.auth import models
models.User = helper_models.User
models.Group = helper_models.Group
models.Permission = helper_models.Permission
models.Message = helper_models.Message
from django.contrib.auth import middleware as django_middleware
from appengine_django.auth.middleware import AuthenticationMiddleware
django_middleware.AuthenticationMiddleware = AuthenticationMiddleware
from django.contrib.auth import decorators as django_decorators
from appengine_django.auth.decorators import login_required
django_decorators.login_required = login_required
from django.contrib import auth as django_auth
from django.contrib.auth import tests as django_tests
django_auth.suite = unittest.TestSuite
django_tests.suite = unittest.TestSuite
logging.debug("Installing authentication framework")
except ImportError:
logging.debug("No Django authentication support available")
def InstallModelForm():
"""Replace Django ModelForm with the AppEngine ModelForm."""
# This MUST happen as early as possible, but after any auth model patching.
from google.appengine.ext.db import djangoforms as aeforms
try:
# pre 1.0
from django import newforms as forms
except ImportError:
from django import forms
forms.ModelForm = aeforms.ModelForm
# Extend ModelForm with support for EmailProperty
# TODO: This should be submitted to the main App Engine SDK.
from google.appengine.ext.db import EmailProperty
def get_form_field(self, **kwargs):
"""Return a Django form field appropriate for an email property."""
defaults = {'form_class': forms.EmailField}
defaults.update(kwargs)
return super(EmailProperty, self).get_form_field(**defaults)
EmailProperty.get_form_field = get_form_field
| Python |
#!/usr/bin/python2.4
#
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This file acts as a very minimal replacement for the 'imp' module.
It contains only what Django expects to use and does not actually implement the
same functionality as the real 'imp' module.
"""
def find_module(name, path=None):
"""Django needs imp.find_module, but it works fine if nothing is found."""
raise ImportError
| Python |
#!/usr/bin/python2.4
#
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module replaces the Django mail implementation with a version that sends
email via the mail API provided by Google App Engine.
Multipart / HTML email is not yet supported.
"""
import logging
from django.core import mail
from django.core.mail import SMTPConnection
from django.conf import settings
from google.appengine.api import mail as gmail
class GoogleSMTPConnection(SMTPConnection):
def __init__(self, host=None, port=None, username=None, password=None,
use_tls=None, fail_silently=False):
self.use_tls = (use_tls is not None) and use_tls or settings.EMAIL_USE_TLS
self.fail_silently = fail_silently
self.connection = None
def open(self):
self.connection = True
def close(self):
pass
def _send(self, email_message):
"""A helper method that does the actual sending."""
if not email_message.to:
return False
try:
if (isinstance(email_message,gmail.EmailMessage)):
e = message
elif (isinstance(email_message,mail.EmailMessage)):
e = gmail.EmailMessage(sender=email_message.from_email,
to=email_message.to,
subject=email_message.subject,
body=email_message.body)
if email_message.extra_headers.get('Reply-To', None):
e.reply_to = email_message.extra_headers['Reply-To']
if email_message.bcc:
e.bcc = list(email_message.bcc)
#TODO - add support for html messages and attachments...
e.send()
except:
if not self.fail_silently:
raise
return False
return True
def mail_admins(subject, message, fail_silently=False):
"""Sends a message to the admins, as defined by the ADMINS setting."""
_mail_group(settings.ADMINS, subject, message, fail_silently)
def mail_managers(subject, message, fail_silently=False):
"""Sends a message to the managers, as defined by the MANAGERS setting."""
_mail_group(settings.MANAGERS, subject, message, fail_silently)
def _mail_group(group, subject, message, fail_silently=False):
"""Sends a message to an administrative group."""
if group:
mail.send_mail(settings.EMAIL_SUBJECT_PREFIX + subject, message,
settings.SERVER_EMAIL, [a[1] for a in group],
fail_silently)
return
# If the group had no recipients defined, default to the App Engine admins.
try:
gmail.send_mail_to_admins(settings.SERVER_EMAIL,
settings.EMAIL_SUBJECT_PREFIX + subject,
message)
except:
if not fail_silently:
raise
| Python |
#!/usr/bin/env python
from appengine_django import InstallAppengineHelperForDjango
InstallAppengineHelperForDjango()
from django.core.management import execute_manager
try:
import settings # Assumed to be in the same directory.
except ImportError:
import sys
sys.stderr.write("Error: Can't find the file 'settings.py' in the directory containing %r. It appears you've customized things.\nYou'll have to run django-admin.py, passing it your settings module.\n(If the file settings.py does indeed exist, it's causing an ImportError somehow.)\n" % __file__)
sys.exit(1)
if __name__ == "__main__":
execute_manager(settings)
| Python |
#!/usr/bin/env python
import imageshack
import sys
if __name__ == "__main__":
if len(sys.argv)!=3:
print "Usage upload.py <devkey> <filename/url>"
sys.exit(1)
u = imageshack.Uploader(sys.argv[1])
try:
if sys.argv[2].startswith("http://"):
print u.uploadURL(sys.argv[2])
else:
print u.uploadFile(sys.argv[2])
except imageshack.ServerException, e:
print str(e)
| Python |
#!/usr/bin/env python
import yfrog
import sys
if __name__ == "__main__":
if len(sys.argv)<4:
print "Usage upload.py <username> <password> <filename/url> [<text>] [<source>]"
sys.exit(1)
u = yfrog.Uploader()
if len(sys.argv)>=5:
msg = sys.argv[4]
else:
msg = None
if len(sys.argv)>=6:
src = sys.argv[5]
else:
src = 'yfrog'
try:
if sys.argv[3].startswith("http://"):
print u.uploadURL(sys.argv[3],sys.argv[1],sys.argv[2],message=msg,source=src)
else:
print u.uploadFile(sys.argv[3],sys.argv[1],sys.argv[2],message=msg,source=src)
except yfrog.ServerException, e:
print str(e)
| Python |
#!/usr/bin/env python
import os
import sys
import getopt
import imageshack
def usage():
print "Usage apitest.py --file filename --key key [--id ID] [--cookie COOKIE] [--tags TAGS] [--visibility PUBLIC] [--username USERNAME] [--password PASSWORD] [--blocksize=BLOCKSIZE] [--numblocks=NUMBLOCKS]"
def getopts(argv):
data = { 'file': None,
'key': None,
'cookie': None,
'id': None,
'tags': None,
'username': None,
'password': None,
'public': None }
try:
opts, args = getopt.getopt(sys.argv[1:], "f:k:i:t:v:u:p:b:n:", ["file=","key=","id=","cookie=","tags=","visibility=","username=","password=","blocksize=","numblocks="])
except getopt.GetoptError, err:
print str(err)
usage()
sys.exit(2)
for o, a in opts:
if o in ("-f", "--file"):
data['file'] = a
if o in ("-k", "--key"):
data['key'] = a
elif o in ("-i", "--id"):
data['id'] = a
elif o in ("-c", "--cookie"):
data['cookie'] = a
elif o in ("-t", "--tags"):
data['tags'] = a
elif o in ("-v", "--visibility"):
data['public'] = True if a.upper() == 'TRUE' else False
elif o in ("-u", "--username"):
data['username'] = a
elif o in ("-p", "--password"):
data['password'] = a
elif o in ("-b", "--blocksize"):
data['blocksize'] = int(a)
elif o in ("-n", "--numblocks"):
data['numblocks'] = int(a)
return data
def main():
data = getopts(sys.argv)
if not data['key']:
print 'ERROR: No developer key specified'
sys.exit(1)
if not data['file'] or not os.path.isfile(data['file']):
print 'ERROR: No file specified or not existing file'
sys.exit(1)
uploader = imageshack.ChunkedUploader(data['key'], data['cookie'],
data['username'], data['password'])
try:
res = uploader.upload_file(data['file'], data['tags'], data['public'])
except Exception as e:
print 'ERROR: File could not be uploaded:'
print e
sys.exit(1)
print res[2]
if __name__ == "__main__":
main()
| Python |
#!/usr/bin/env python
'''
Client API library to upload images and videos to yfrog.com
Using Yfrog public API, as described here:
http://yfrog.com/api.php
'''
import urllib2_file
import urllib2
import socket
from mimetypes import guess_type
from xml.dom.minidom import parseString
from os.path import exists
API_URL = 'http://yfrog.com/api/%s'
HTTP_UPLOAD_TIMEOUT = 300
class UploadException(Exception):
''' Exceptions of this class are raised for various upload based errors '''
pass
class ServerException(Exception):
''' Exceptions of this class are raised for upload errors reported by server '''
def __init__(self, code, message):
self.code = code
self.message = message
def __str__(self):
return "ServerException:%d:%s" % (self.code, self.message)
class Uploader:
''' Class to upload images and video to yfrog.com '''
def __init__(self, timeout=HTTP_UPLOAD_TIMEOUT):
'''Creates uploader object.
Args:
timeout: timeout in seconds for upload operation (optional)
'''
self.timeout = timeout
def uploadURL(self,
url,
twitter_username,
twitter_password,
message = None,
tags = None,
public = True,
source = 'yfrog',
auth = None,
key = None):
'''Uploads local file.
Args:
url: url of file to be uploaded
twitter_username: password
twitter_password: username
message: Message to post to twitter. The URL of the image or video is automatically added. (optional)
tags: comma-separated list of tags (optional)
public: whenever image is public or not
source: Twitter 'posted from' attribute
key: Developer key. See http://code.google.com/p/imageshackapi/wiki/DeveloperKey
returns dictionary with with following keys:
url: url of uploaded image (this is URL for HTML page)
'''
data = {'url' : url,
'public' : self._yesno(public),
'username' : twitter_username,
'password' : twitter_password,
'source' : source
}
if tags:
data['tags'] = tags
if auth:
data['auth'] = auth
if key:
data['key'] = key
if message:
data['message'] = message
apiurl = API_URL % "uploadAndPost"
else:
apiurl = API_URL % "upload"
req = urllib2.Request(apiurl, data, {})
socket.setdefaulttimeout(self.timeout)
u = urllib2.urlopen(req)
xmlres = u.read()
return self._parseResponse(xmlres)
def uploadFile(self,
filename,
twitter_username,
twitter_password,
message = None,
content_type = None,
tags = None,
public = True,
source = 'yfrog',
auth = None,
key = None):
'''Uploads local file.
Args:
filename: media file name to be uploaded
twitter_username: password
twitter_password: username
message: Message to post to twitter. The URL of the image or video is automatically added. (optional)
content_type: content type of file. (optional)
tags: comma-separated list of tags (optional)
public: whenever image is public or not
source: Twitter 'posted from' attribute
key: Developer key. See http://code.google.com/p/imageshackapi/wiki/DeveloperKey
returns dictionary with with following keys:
url: url of uploaded image (this is URL for HTML page)
'''
if not exists(filename):
raise UploadException("File %s does not exist" % filename)
if content_type == None:
(content_type, encoding) = guess_type(filename, False)
if content_type==None:
raise UploadException("Could not guess content/type for input file %s" % filename)
fd = open(filename,'rb')
try:
data = {'media' : urllib2_file.FileUpload(fd, content_type),
'public' : self._yesno(public),
'username' : twitter_username,
'password' : twitter_password,
'source' : source
}
if tags:
data['tags'] = tags
if auth:
data['auth'] = auth
if key:
data['key'] = key
if message:
data['message'] = message
apiurl = API_URL % "uploadAndPost"
else:
apirul = API_URL % "upload"
req = urllib2.Request(apiurl, data, {})
socket.setdefaulttimeout(self.timeout)
u = urllib2.urlopen(req)
xmlres = u.read()
return self._parseResponse(xmlres)
finally:
fd.close()
def _parseErrorResponse(self, d):
err = d.getElementsByTagName('err')
if err==None or len(err)!=1:
raise UploadException("Cound not decode server XML response (no err element)")
ca = err[0].attributes.get('code')
if ca==None:
raise UploadException("Cound not decode server XML response (no code attriubute)")
ma = err[0].attributes.get('msg')
if ma==None:
raise ServerException(int(ca.value), None)
else:
raise ServerException(int(ca.value),ma.value)
def _parseOKResponse(self,d):
mu = d.getElementsByTagName('mediaurl')
if mu==None or len(mu)!=1:
raise UploadException("Cound not decode server XML response (no mediaurl element)")
url = self._getText(mu[0].childNodes)
return {'url':url}
def _parseResponse(self, xmlres):
d = parseString(xmlres)
try:
rsp = d.getElementsByTagName('rsp')
if rsp==None or len(rsp)!=1:
raise UploadException("Cound not decode server XML response (no rsp element)")
sa =rsp[0].attributes.get('stat')
if sa==None:
raise UploadException("Cound not decode server XML response (no stat attriubute)")
if sa.value=='fail':
return self._parseErrorResponse(d)
elif sa.value=='ok':
return self._parseOKResponse(d)
else:
raise UploadException("Cound not decode server XML response (unrecognized stat attriubute value)")
finally:
d.unlink()
def _yesno(self, x):
if x:
return 'yes'
else:
return 'no'
def _getText(self, nodelist):
rc = ""
for node in nodelist:
if node.nodeType == node.TEXT_NODE:
rc = rc + node.data
return rc
| Python |
''' yfrog api '''
from upload import *
| Python |
#!/usr/bin/env python
####
# Version: 0.2.0
# - UTF-8 filenames are now allowed (Eli Golovinsky)<br/>
# - File object is no more mandatory, Object only needs to have seek() read() attributes (Eli Golovinsky)<br/>
#
# Version: 0.1.0
# - upload is now done with chunks (Adam Ambrose)
#
# Version: older
# THANKS TO:
# bug fix: kosh @T aesaeion.com
# HTTPS support : Ryan Grow <ryangrow @T yahoo.com>
# Copyright (C) 2004,2005,2006 Fabien SEISEN
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# you can contact me at: <fabien@seisen.org>
# http://fabien.seisen.org/python/
#
# Also modified by Adam Ambrose (aambrose @T pacbell.net) to write data in
# chunks (hardcoded to CHUNK_SIZE for now), so the entire contents of the file
# don't need to be kept in memory.
#
"""
enable to upload files using multipart/form-data
idea from:
upload files in python:
http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/146306
timeoutsocket.py: overriding Python socket API:
http://www.timo-tasi.org/python/timeoutsocket.py
http://mail.python.org/pipermail/python-announce-list/2001-December/001095.html
import urllib2_files
import urllib2
u = urllib2.urlopen('http://site.com/path' [, data])
data can be a mapping object or a sequence of two-elements tuples
(like in original urllib2.urlopen())
varname still need to be a string and
value can be string of a file object
eg:
((varname, value),
(varname2, value),
)
or
{ name: value,
name2: value2
}
"""
import os
import socket
import sys
import stat
import mimetypes
import mimetools
import httplib
import urllib
import urllib2
CHUNK_SIZE = 65536
class FileUpload:
def __init__(self, fd, content_type):
self.fd = fd
self.content_type = content_type
def get_content_type(filename):
return mimetypes.guess_type(filename)[0] or 'application/octet-stream'
# if sock is None, juste return the estimate size
def send_data(v_vars, v_files, boundary, sock=None):
l = 0
for (k, v) in v_vars:
buffer=''
buffer += '--%s\r\n' % boundary
buffer += 'Content-Disposition: form-data; name="%s"\r\n' % k
buffer += '\r\n'
buffer += v + '\r\n'
if sock:
sock.send(buffer)
l += len(buffer)
for (k, v) in v_files:
fd = v.fd
file_size = os.fstat(fd.fileno())[stat.ST_SIZE]
name = fd.name.split('/')[-1]
if isinstance(name, unicode):
name = name.encode('UTF-8')
buffer=''
buffer += '--%s\r\n' % boundary
buffer += 'Content-Disposition: form-data; name="%s"; filename="%s"\r\n' \
% (k, name)
if v.content_type != None:
content_type = v.content_type
else:
content_type = get_content_type(name)
buffer += 'Content-Type: %s\r\n' % content_type
buffer += 'Content-Length: %ld\r\n' % file_size
buffer += '\r\n'
l += len(buffer)
if sock:
sock.send(buffer)
if hasattr(fd, 'seek'):
fd.seek(0)
while True:
chunk = fd.read(CHUNK_SIZE)
if not chunk: break
sock.send(chunk)
l += file_size
buffer='\r\n'
buffer += '--%s--\r\n' % boundary
buffer += '\r\n'
if sock:
sock.send(buffer)
l += len(buffer)
return l
# mainly a copy of HTTPHandler from urllib2
class newHTTPHandler(urllib2.BaseHandler):
def http_open(self, req):
return self.do_open(httplib.HTTP, req)
def do_open(self, http_class, req):
data = req.get_data()
v_files=[]
v_vars=[]
# mapping object (dict)
if req.has_data() and type(data) != str:
if hasattr(data, 'items'):
data = data.items()
else:
try:
if len(data) and not isinstance(data[0], tuple):
raise TypeError
except TypeError:
ty, va, tb = sys.exc_info()
raise TypeError, "not a valid non-string sequence or mapping object", tb
for (k, v) in data:
if isinstance(v, FileUpload):
v_files.append((k, v))
else:
v_vars.append( (k, v) )
# no file ? convert to string
if len(v_vars) > 0 and len(v_files) == 0:
data = urllib.urlencode(v_vars)
v_files=[]
v_vars=[]
host = req.get_host()
if not host:
raise urllib2.URLError('no host given')
h = http_class(host) # will parse host:port
if req.has_data():
h.putrequest('POST', req.get_selector())
if not 'Content-type' in req.headers:
if len(v_files) > 0:
boundary = mimetools.choose_boundary()
l = send_data(v_vars, v_files, boundary)
h.putheader('Content-Type',
'multipart/form-data; boundary=%s' % boundary)
h.putheader('Content-length', str(l))
else:
h.putheader('Content-type',
'application/x-www-form-urlencoded')
if not 'Content-length' in req.headers:
h.putheader('Content-length', '%d' % len(data))
else:
h.putrequest('GET', req.get_selector())
scheme, sel = urllib.splittype(req.get_selector())
sel_host, sel_path = urllib.splithost(sel)
h.putheader('Host', sel_host or host)
for name, value in self.parent.addheaders:
name = name.capitalize()
if name not in req.headers:
h.putheader(name, value)
for k, v in req.headers.items():
h.putheader(k, v)
# httplib will attempt to connect() here. be prepared
# to convert a socket error to a URLError.
try:
h.endheaders()
except socket.error, err:
raise urllib2.URLError(err)
if req.has_data():
if len(v_files) >0:
l = send_data(v_vars, v_files, boundary, h)
elif len(v_vars) > 0:
# if data is passed as dict ...
data = urllib.urlencode(v_vars)
h.send(data)
else:
# "normal" urllib2.urlopen()
h.send(data)
code, msg, hdrs = h.getreply()
fp = h.getfile()
if code == 200:
resp = urllib.addinfourl(fp, hdrs, req.get_full_url())
resp.code = code
resp.msg = msg
return resp
else:
return self.parent.error('http', req, fp, code, msg, hdrs)
urllib2._old_HTTPHandler = urllib2.HTTPHandler
urllib2.HTTPHandler = newHTTPHandler
class newHTTPSHandler(newHTTPHandler):
def https_open(self, req):
return self.do_open(httplib.HTTPS, req)
urllib2.HTTPSHandler = newHTTPSHandler
if __name__ == '__main__':
import getopt
import urllib2
import urllib2_file
import string
import sys
def usage(progname):
print """
SYNTAX: %s -u url -f file [-v]
""" % progname
try:
opts, args = getopt.getopt(sys.argv[1:], 'hvu:f:')
except getopt.GetoptError, errmsg:
print "ERROR:", errmsg
sys.exit(1)
v_url = ''
v_verbose = 0
v_file = ''
for name, value in opts:
if name in ('-h',):
usage(sys.argv[0])
sys.exit(0)
elif name in ('-v',):
v_verbose += 1
elif name in ('-u',):
v_url = value
elif name in ('-f',):
v_file = value
else:
print "invalid argument:", name
sys.exit(2)
error = 0
if v_url == '':
print "need -u"
error += 1
if v_file == '':
print "need -f"
error += 1
if error > 0:
sys.exit(3)
fd = open(v_file, 'r')
data = {
'filename' : fd,
}
# u = urllib2.urlopen(v_url, data)
req = urllib2.Request(v_url, data, {})
try:
u = urllib2.urlopen(req)
except urllib2.HTTPError, errobj:
print "HTTPError:", errobj.code
else:
buf = u.read()
print "OK"
| Python |
#!/usr/bin/env python
'''
Client API library to upload images and videos to imageshack.us
Using "Unified upload API" as described here:
http://reg.imageshack.us/content.php?page=developerpublic
'''
import urllib2_file
import urllib2
import socket
import httplib
from mimetypes import guess_type
from xml.dom.minidom import parseString
from os.path import exists
from urlparse import urlsplit
IMAGE_API_URL = 'http://www.imageshack.us/upload_api.php'
VIDEO_API_URL = 'http://render.imageshack.us/upload_api.php'
HTTP_UPLOAD_TIMEOUT = 300
class UploadException(Exception):
''' Exceptions of this class are raised for various upload based errors '''
pass
class ServerException(Exception):
''' Exceptions of this class are raised for upload errors reported by server '''
def __init__(self, code, message):
self.code = code
self.message = message
def __str__(self):
return "ServerException:%s:%s" % (self.code, self.message)
class Uploader:
''' Class to upload images and video to imageshack.
'''
def __init__(self, dev_key, cookie=None, username=None, password=None, timeout=HTTP_UPLOAD_TIMEOUT):
'''Creates uploader object.
Args:
dev_key: developer key (mandatory)
cookie: imagesack user cookie (optional)
username,password: imageshack user account credentials (optional)
timeout: timeout in seconds for upload operation (optional)
'''
self.cookie = cookie
self.username = username
self.password = password
self.dev_key = dev_key
self.timeout = timeout
def uploadFile(self,
filename,
optsize = None,
remove_bar = True,
tags = None,
public = None,
content_type = None,
frame_filename = None):
''' upload image or video file
Args:
filename: file name of image or video file to upload
optizie: optional reisizing parameter in format of (widh, height) tuple
remove_bar: remove information bar on thumbnail
content_type: content type of file. (optional)
tags: comma-separated list of tags (optional)
public: whenever image is public or not. None means "user default" (optional)
frame_filename: for video files optional video frame which will be shown in player while movie is loading. Must be in JPEG format.
Returns:
returns XML document with information on uploaded image.
'''
return self._upload(filename, None,
optsize, remove_bar,
tags, public,
content_type, frame_filename)
def uploadURL(self,
url,
optsize = None,
remove_bar = True,
tags = None,
public = None,
frame_filename = None):
''' upload image or video file
Args:
url: URL pointing to image or video file to upload
optizie: optional reisizing parameter in format of (widh, height) tuple
remove_bar: remove information bar on thumbnail
content_type: content type of file. (optional)
tags: comma-separated list of tags (optional)
public: whenever image is public or not. None means "user default" (optional)
frame_filename: for video files optional video frame which will be shown in player while movie is loading. Must be in JPEG format.
Returns:
returns XML document with information on uploaded image.
'''
return self._upload(None, url,
optsize, remove_bar,
tags, public,
None, frame_filename)
def _upload(self,
filename,
url,
optsize = None,
remove_bar = True,
tags = None,
public = True,
content_type = None,
frame_filename = None):
if not filename and not url:
raise UploadException("No source specified")
if (self.username and not self.password) or (self.password and not self.username):
raise UploadException("Must specify both usernane and password")
if self.username and self.cookie:
raise UploadException("Must specify either usernane/password or cookie but not both")
if frame_filename and not exists(frame_filename):
raise UploadException("File %s does not exist" % frame_filename)
if filename:
if not exists(filename):
raise UploadException("File %s does not exist" % filename)
if content_type == None:
(content_type, encoding) = guess_type(filename, False)
else:
content_type = self._getURLContentType(url)
if content_type==None:
raise UploadException("Could not guess content/type for input file %s" % filename)
if content_type.lower().startswith("image/"):
u = IMAGE_API_URL
is_video=False
elif content_type.lower().startswith("video/"):
u = VIDEO_API_URL
is_video=True
else:
raise UploadException("Unsupported content type %s" % content_type)
# some sanity checks
if is_video:
if optsize:
raise UploadException("Resizing is not supported for video files")
else:
if frame_filename:
raise UploadException("Could not specify frame for image files")
if filename:
fd = open(filename,'rb')
else:
fd = None
try:
data = {'key' : self.dev_key,
'rembar' : self._yesno(remove_bar)
}
if fd:
data['fileupload']=urllib2_file.FileUpload(fd,content_type)
else:
data['url']=url
if frame_filename!=None:
tfd = open(frame_filename,'rb')
else:
tfd = None
try:
if tfd!=None:
data['frmupload'] = urllib2_file.FileUpload(tfd,"image/jpeg")
# Some optional parameters
if public:
data['public'] = self._yesno(public)
if optsize:
data['optimage'] = '1'
data['optsize'] = "%dx%d" % optsize
if self.cookie:
data['cookie'] = self.cookie
if self.username:
data['a_username'] = self.username
if self.password:
data['a_password'] = self.username
if tags:
data['tags'] = tags
req = urllib2.Request(u, data, {})
socket.setdefaulttimeout(HTTP_UPLOAD_TIMEOUT)
u = urllib2.urlopen(req)
xmlres = u.read()
return self._parseResponse(xmlres)
finally:
if tfd!=None:
tfd.close()
finally:
if fd:
fd.close()
def _yesno(self, x):
if x:
return 'yes'
else:
return 'no'
def _parseErrorResponse(self, err):
ia = err.attributes.get('id')
if ia==None:
raise UploadException("Cound not decode server error XML response (no id attriubute)")
raise ServerException(ia.value, self._getText(err.childNodes))
def _parseResponse(self, xmlres):
d = parseString(xmlres)
try:
links = d.getElementsByTagName('links')
if links==None or len(links)!=1:
raise UploadException("Cound not decode server XML response (no links element)")
error = links[0].getElementsByTagName('error')
if error!=None and len(error)>0:
return self._parseErrorResponse(error[0])
else:
return xmlres
finally:
d.unlink()
def _getText(self, nodelist):
rc = ""
for node in nodelist:
if node.nodeType == node.TEXT_NODE:
rc = rc + node.data
return rc
def _getURLContentType(self, url):
parsed_url = urlsplit(url)
if parsed_url==None or parsed_url.hostname==None or len(parsed_url.hostname)==0:
raise UploadException("Invalid URL %s" % url)
c = httplib.HTTPConnection(parsed_url.hostname)
c.request('HEAD', url)
r = c.getresponse()
if r.status!=200:
raise UploadException("Error %d fetching URL %s" % (r.status, url))
return r.getheader("Content-Type")
| Python |
#!/usr/bin/env python
'''
Client API library for chuncked video uploading to imageshack.us
Using "Streaming upload API" as described here:
http://code.google.com/p/imageshackapi/wiki/StreamingAPI
'''
import os
import urllib
import httplib
import urllib2
from urlparse import urlparse
from os.path import exists
from urlparse import urlsplit
from mimetypes import guess_type
from xml.dom.minidom import parse
from xml.dom.minidom import parseString
BLOCK_SIZE=1024
SERVER='render.imageshack.us'
PATH='/renderapi'
ENDPOINT='http://'+SERVER+PATH
class UploadException(Exception):
''' Exceptions of this class are raised for various upload based errors '''
pass
class ServerException(Exception):
''' Exceptions of this class are raised for upload errors reported by server '''
def __init__(self, code, message):
self.code = code
self.message = message
def __str__(self):
return "ServerException:%s:%s" % (self.code, self.message)
class Uploader:
''' Class to upload images and video to imageshack.
'''
def __init__(self, dev_key, cookie=None, username=None, password=None):
'''Creates uploader object.
Args:
dev_key: developer key (mandatory)
cookie: imagesack user cookie (optional)
username,password: imageshack user account credentials (optional)
'''
self.cookie = cookie
self.username = username
self.password = password
self.dev_key = dev_key
def start(self, filename, tags = [], public = None):
'''Request file upload URL from server
tags: list of tags
public: visibility
'''
data = {'filename' : filename}
data['key'] = self.dev_key
if self.cookie is not None:
data['cookie'] = self.cookie
if tags:
data['tags'] = ','.join(tags)
if public in (True, False):
data['public'] = "yes" if public else "no"
if self.username is not None:
data['a_username'] = self.username
if self.password is not None:
data['a_password'] = self.password
print data
try:
req = urllib2.urlopen(ENDPOINT+'/start', urllib.urlencode(data))
xml = req.read()
except:
raise UploadException('Could not connect to server')
try:
dom = parseString(xml)
url = dom.documentElement.getAttribute('putURL')
getlenurl = dom.documentElement.getAttribute('getlengthURL')
except:
raise ServerException('Wrong server response')
dom.unlink()
req.close()
return (url, getlenurl)
def get_length(self, url):
'''Get uploaded file name
Args:
url: getlengthURL of start output
returns int byte count
'''
try: size = urllib.urlopen(url).read()
except: raise UploadException('Could not connect to server')
try: size = int(size)
except: raise ServerException('Wrong server response')
return size
def upload_file(self, filename, tags = [], public = True, end = -1):
'''Upload file to ImageShack using streaming API
Args:
tags: list of tags
public: visibility (True, False or None)
end: last byte number that will be uploaded.
If end is -1, file will be uploaded to the end.
'''
url = self.start(filename, tags, public)[0]
return self.upload_range(filename, url, 0, -1)
def resume_upload(self, filename, url, getlenurl, end = -1):
'''Resumes file upload
Args:
url: putURL from start output
getlenurl: getlenURL from start output
end: last byte number to upload (-1 for all file)
'''
size = self.get_length(getlenurl)
return self.upload_range(filename, url, size, end)
def upload_range(self, filename, url, begin = 0, end = -1):
'''Upload file to server
Args:
url: upload url (get one using start method)
begin: first byte number
end: last byte number to upload (-1 for all file)
'''
purl = urlparse(url)
current_byte = begin
filelen = os.path.getsize(filename)
if end == -1: end = filelen
if end > filelen: end = filelen
try:
conn = httplib.HTTPConnection(purl.netloc)
conn.connect()
conn.putrequest('PUT', purl.path)
range_str="bytes %d-%d/%d" % (begin, end, filelen)
conn.putheader('Content-range', range_str)
conn.putheader('Content-type', 'application/octet-stream')
conn.putheader('Content-length', (end - begin)))
conn.endheaders()
except:
raise UploadException('Could not connect to server')
try: fileobj = open(filename, 'rb')
except: raise UploadException('Could not open file')
try: fileobj.seek(begin)
except: raise UploadException('Could not seek file')
while current_byte < end:
try:
data = fileobj.read(BLOCK_SIZE)
print 'sending %d bytes' % len(data)
except: raise UploadException('File I/O error')
try: conn.send(data)
except: raise UploadException('Could not send data')
current_byte += len(data)
print 'sent data'
fileobj.close()
try:
print 'waiting for response'
resp = conn.getresponse()
print 'reading response'
res = resp.read()
except:
raise UploadException('Could not get server response')
return (resp.status, resp.reason, res)
| Python |
''' imageshack api '''
from upload import *
from chuncked_upload import Uploader as ChunkedUploader | Python |
#!/usr/bin/env python
####
# Version: 0.2.0
# - UTF-8 filenames are now allowed (Eli Golovinsky)<br/>
# - File object is no more mandatory, Object only needs to have seek() read() attributes (Eli Golovinsky)<br/>
#
# Version: 0.1.0
# - upload is now done with chunks (Adam Ambrose)
#
# Version: older
# THANKS TO:
# bug fix: kosh @T aesaeion.com
# HTTPS support : Ryan Grow <ryangrow @T yahoo.com>
# Copyright (C) 2004,2005,2006 Fabien SEISEN
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# you can contact me at: <fabien@seisen.org>
# http://fabien.seisen.org/python/
#
# Also modified by Adam Ambrose (aambrose @T pacbell.net) to write data in
# chunks (hardcoded to CHUNK_SIZE for now), so the entire contents of the file
# don't need to be kept in memory.
#
"""
enable to upload files using multipart/form-data
idea from:
upload files in python:
http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/146306
timeoutsocket.py: overriding Python socket API:
http://www.timo-tasi.org/python/timeoutsocket.py
http://mail.python.org/pipermail/python-announce-list/2001-December/001095.html
import urllib2_files
import urllib2
u = urllib2.urlopen('http://site.com/path' [, data])
data can be a mapping object or a sequence of two-elements tuples
(like in original urllib2.urlopen())
varname still need to be a string and
value can be string of a file object
eg:
((varname, value),
(varname2, value),
)
or
{ name: value,
name2: value2
}
"""
import os
import socket
import sys
import stat
import mimetypes
import mimetools
import httplib
import urllib
import urllib2
CHUNK_SIZE = 65536
class FileUpload:
def __init__(self, fd, content_type):
self.fd = fd
self.content_type = content_type
def get_content_type(filename):
return mimetypes.guess_type(filename)[0] or 'application/octet-stream'
# if sock is None, juste return the estimate size
def send_data(v_vars, v_files, boundary, sock=None):
l = 0
for (k, v) in v_vars:
buffer=''
buffer += '--%s\r\n' % boundary
buffer += 'Content-Disposition: form-data; name="%s"\r\n' % k
buffer += '\r\n'
buffer += v + '\r\n'
if sock:
sock.send(buffer)
l += len(buffer)
for (k, v) in v_files:
fd = v.fd
file_size = os.fstat(fd.fileno())[stat.ST_SIZE]
name = fd.name.split('/')[-1]
if isinstance(name, unicode):
name = name.encode('UTF-8')
buffer=''
buffer += '--%s\r\n' % boundary
buffer += 'Content-Disposition: form-data; name="%s"; filename="%s"\r\n' \
% (k, name)
if v.content_type != None:
content_type = v.content_type
else:
content_type = get_content_type(name)
buffer += 'Content-Type: %s\r\n' % content_type
buffer += 'Content-Length: %ld\r\n' % file_size
buffer += '\r\n'
l += len(buffer)
if sock:
sock.send(buffer)
if hasattr(fd, 'seek'):
fd.seek(0)
while True:
chunk = fd.read(CHUNK_SIZE)
if not chunk: break
sock.send(chunk)
l += file_size
buffer='\r\n'
buffer += '--%s--\r\n' % boundary
buffer += '\r\n'
if sock:
sock.send(buffer)
l += len(buffer)
return l
# mainly a copy of HTTPHandler from urllib2
class newHTTPHandler(urllib2.BaseHandler):
def http_open(self, req):
return self.do_open(httplib.HTTP, req)
def do_open(self, http_class, req):
data = req.get_data()
v_files=[]
v_vars=[]
# mapping object (dict)
if req.has_data() and type(data) != str:
if hasattr(data, 'items'):
data = data.items()
else:
try:
if len(data) and not isinstance(data[0], tuple):
raise TypeError
except TypeError:
ty, va, tb = sys.exc_info()
raise TypeError, "not a valid non-string sequence or mapping object", tb
for (k, v) in data:
if isinstance(v, FileUpload):
v_files.append((k, v))
else:
v_vars.append( (k, v) )
# no file ? convert to string
if len(v_vars) > 0 and len(v_files) == 0:
data = urllib.urlencode(v_vars)
v_files=[]
v_vars=[]
host = req.get_host()
if not host:
raise urllib2.URLError('no host given')
h = http_class(host) # will parse host:port
if req.has_data():
h.putrequest('POST', req.get_selector())
if not 'Content-type' in req.headers:
if len(v_files) > 0:
boundary = mimetools.choose_boundary()
l = send_data(v_vars, v_files, boundary)
h.putheader('Content-Type',
'multipart/form-data; boundary=%s' % boundary)
h.putheader('Content-length', str(l))
else:
h.putheader('Content-type',
'application/x-www-form-urlencoded')
if not 'Content-length' in req.headers:
h.putheader('Content-length', '%d' % len(data))
else:
h.putrequest('GET', req.get_selector())
scheme, sel = urllib.splittype(req.get_selector())
sel_host, sel_path = urllib.splithost(sel)
h.putheader('Host', sel_host or host)
for name, value in self.parent.addheaders:
name = name.capitalize()
if name not in req.headers:
h.putheader(name, value)
for k, v in req.headers.items():
h.putheader(k, v)
# httplib will attempt to connect() here. be prepared
# to convert a socket error to a URLError.
try:
h.endheaders()
except socket.error, err:
raise urllib2.URLError(err)
if req.has_data():
if len(v_files) >0:
l = send_data(v_vars, v_files, boundary, h)
elif len(v_vars) > 0:
# if data is passed as dict ...
data = urllib.urlencode(v_vars)
h.send(data)
else:
# "normal" urllib2.urlopen()
h.send(data)
code, msg, hdrs = h.getreply()
fp = h.getfile()
if code == 200:
resp = urllib.addinfourl(fp, hdrs, req.get_full_url())
resp.code = code
resp.msg = msg
return resp
else:
return self.parent.error('http', req, fp, code, msg, hdrs)
urllib2._old_HTTPHandler = urllib2.HTTPHandler
urllib2.HTTPHandler = newHTTPHandler
class newHTTPSHandler(newHTTPHandler):
def https_open(self, req):
return self.do_open(httplib.HTTPS, req)
urllib2.HTTPSHandler = newHTTPSHandler
if __name__ == '__main__':
import getopt
import urllib2
import urllib2_file
import string
import sys
def usage(progname):
print """
SYNTAX: %s -u url -f file [-v]
""" % progname
try:
opts, args = getopt.getopt(sys.argv[1:], 'hvu:f:')
except getopt.GetoptError, errmsg:
print "ERROR:", errmsg
sys.exit(1)
v_url = ''
v_verbose = 0
v_file = ''
for name, value in opts:
if name in ('-h',):
usage(sys.argv[0])
sys.exit(0)
elif name in ('-v',):
v_verbose += 1
elif name in ('-u',):
v_url = value
elif name in ('-f',):
v_file = value
else:
print "invalid argument:", name
sys.exit(2)
error = 0
if v_url == '':
print "need -u"
error += 1
if v_file == '':
print "need -f"
error += 1
if error > 0:
sys.exit(3)
fd = open(v_file, 'r')
data = {
'filename' : fd,
}
# u = urllib2.urlopen(v_url, data)
req = urllib2.Request(v_url, data, {})
try:
u = urllib2.urlopen(req)
except urllib2.HTTPError, errobj:
print "HTTPError:", errobj.code
else:
buf = u.read()
print "OK"
| Python |
# Copyright 2013 Mario Graff Guerrero
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from multiprocessing import Process, cpu_count
import time
class Pool(object):
def __init__(self, cpu=cpu_count()/2):
self._cpu = cpu
def run(self, func, lst_args):
lst_args.reverse()
process_lst = []
while len(lst_args):
process_lst = filter(lambda x: x.is_alive(), process_lst)
for i in range(len(process_lst), self._cpu):
if len(lst_args) == 0:
continue
p = Process(target=func, args=lst_args.pop())
p.start()
process_lst.append(p)
time.sleep(0.1)
class VerifyOutput(object):
def __init__(self, max_error=16, forecast=True):
self._max_error = max_error
self.value = None
self._forecast = forecast
def get_value(self, ts, vs):
self.compute(ts, vs)
return self.value
def get_value_ts(self, ts, vs):
self.compute(ts, vs)
return self.value_ts
def ratio(self, ts, vs):
self.compute(ts, vs)
return (self.value + 1) / (self.value_ts + 1)
def compute(self, ts, vs):
from scipy import optimize
if self._forecast:
x = np.arange(ts.shape[0] + vs.shape[0])
else:
x = np.arange(ts.shape[0])
x = np.concatenate((x, np.arange(vs.shape[0])))
lst = []
for func in [self.line, self.power, self.exp]:
try:
popt, pcov = optimize.curve_fit(func, x[:ts.shape[0]], ts)
except RuntimeError:
continue
lst.append((self.n_mae(ts, func(x[:ts.shape[0]],
*popt)),
self.n_mae(np.concatenate((ts,
func(x[ts.shape[0]:],
*popt))),
np.concatenate((ts, vs)))))
if len(lst) == 0:
self.value = np.inf
self.value_ts = 0
else:
r = lst[np.argmin(map(lambda x: x[0], lst))]
self.value = r[1]
self.value_ts = r[0]
def verify(self, ts, vs):
if ts.shape[0] + vs.shape[0] > 200:
if vs.shape[0] > 190:
raise Exception("At this moment the forecast cannot be\
greater than 190")
cnt = 200 - vs.shape[0]
ts = ts[-cnt:]
r = self.ratio(ts, vs)
if np.isnan(r):
r = np.inf
if r > self._max_error:
return False
return True
@staticmethod
def line(x, a, b):
return a*x + b
@staticmethod
def exp(x, a):
return np.exp(a*x)
@staticmethod
def power(x, a, b, c):
return a*x**b + c
@staticmethod
def n_mae(a, b):
try:
m, c = np.linalg.solve([[a.min(), 1], [a.max(), 1]],
[0, 1])
except np.linalg.LinAlgError:
m = 1
c = 0
ap = a * m + c
bp = b * m + c
return np.fabs(ap - bp).mean()
| Python |
# Copyright 2013 Mario Graff Guerrero
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from SimpleGP.forest import SubTreeXOPDE, SubTreeXO
class Classification(SubTreeXO):
def train(self, x, f):
y = np.zeros((f.shape[0], np.unique(f).shape[0]),
dtype=self._dtype)
y[np.arange(y.shape[0]), f.astype(np.int)] = 1
super(Classification, self).train(x, y)
return self
def predict(self, X, ind=None):
pr = super(Classification, self).predict(X, ind=ind)
r = pr.argmax(axis=1).astype(self._dtype)
m = np.any(np.isnan(pr), axis=1) | np.any(np.isinf(pr), axis=1)
r[m] = np.nan
return r
@classmethod
def init_cl(cls, nrandom=0, **kwargs):
ins = cls(nrandom=nrandom, **kwargs)
return ins
@staticmethod
def BER(y, yh):
u = np.unique(y)
b = 0
for cl in u:
m = y == cl
b += (~(y[m] == yh[m])).sum() / float(m.sum())
return (b / float(u.shape[0])) * 100.
@staticmethod
def success(y, yh):
return (y == yh).sum() / float(y.shape[0])
class ClassificationPDE(SubTreeXOPDE, Classification):
pass
| Python |
# Copyright 2013 Mario Graff Guerrero
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from SimpleGP.simplegp import GP
from SimpleGP.recursiveGP import RecursiveGP
from SimpleGP.utils import VerifyOutput
class TimeSeries(GP):
def __init__(self, nsteps=2, nlags=1, **kwargs):
super(TimeSeries, self).__init__(**kwargs)
self._nsteps = nsteps
self._nlags = nlags
self._verify_output = VerifyOutput()
@property
def nsteps(self):
"""Number of steps, i.e., points ahead"""
return self._nsteps
@property
def nlags(self):
"""Number of Lags"""
return self._nlags
def test_f(self, r):
flag = super(TimeSeries, self).test_f(r)
if not flag:
return flag
return self._verify_output.verify(self._f, r)
def predict(self, X, ind=None):
end = self.nsteps
if X.shape[1] > self.nlags and X.shape[0] < end:
end = X.shape[0]
if X.shape[0] < end:
x = np.repeat(np.atleast_2d(X[-1]), end,
axis=0)
else:
x = X.copy()
xorigin = self._x.copy()
nlags = self.nlags
pr = np.zeros(end, dtype=self._dtype)
for i in range(end):
self._x[0] = x[i]
pr[i] = self.eval(ind)[0].copy()
if i+1 < end:
x[i+1, 1:nlags] = x[i, :nlags-1]
x[i+1, 0] = pr[i]
self._x[:] = xorigin[:]
return pr
def predict_best(self, X=None):
if X is None:
X = np.atleast_2d(self._x[-1])
return self.predict(X, ind=self.get_best())
@classmethod
def run_cl(cls, serie, y=None, test=None, nlags=None, max_length=None,
**kwargs):
if serie.ndim == 1:
assert y is None
if nlags is None:
nlags = cls.compute_nlags(serie.shape[0])
X, y = cls.create_W(serie, window=nlags)
if max_length is None:
max_length = X.shape[0] // 2
if test is None:
test = np.atleast_2d(X[-1].copy())
return super(TimeSeries, cls).run_cl(X, y, nlags=nlags,
test=test,
max_length=max_length,
**kwargs)
assert y is not None and nlags is not None
if max_length is None:
max_length = serie.shape[0] // 2
if test is None:
test = np.atleast_2d(serie[-1].copy())
return super(TimeSeries, cls).run_cl(serie, y, nlags=nlags,
test=test,
max_length=max_length, **kwargs)
@staticmethod
def compute_nlags(size):
return int(np.ceil(np.log2(size)))
@staticmethod
def create_W(serie, window=10):
assert serie.shape[0] > window
w = np.zeros((serie.shape[0] - window, window), dtype=int)
w[:, :] = np.arange(window)
w = w + np.arange(w.shape[0])[:, np.newaxis]
return serie[w][:, ::-1], serie[window:]
class RTimeSeries(RecursiveGP, TimeSeries):
def train(self, x, f):
index = np.arange(0, f.shape[0], self._nsteps)
self._cases = np.zeros(x.shape[0], dtype=np.int)
self._cases[index] = 1
super(RTimeSeries, self).train(x, f)
def predict_best(self, xreg=None):
x, f = self._x, self._f
xp = np.zeros((self._nsteps, x.shape[1]), dtype=self._dtype)
xp[0, :self._nlags] = f[-self._nlags:][::-1].copy()
if xreg is not None:
xp[:, self._nlags:] = xreg[:, :]
self.train(xp, np.zeros(self._nsteps, dtype=self._dtype))
pr = self.eval(self.get_best())
self.train(x, f)
self._xp = xp
return pr
| Python |
# Copyright 2013 Mario Graff Guerrero
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import types
import numpy as np
from SimpleGP.Simplify_mod import Simplify
from SimpleGP.eval import Eval
from SimpleGP.tree import Tree
from SimpleGP.simplega import SimpleGA
class GP(SimpleGA):
"""
Steady state Genetic Programming system with tournament selection,
subtree crossover and mutation.
- It simplifies each new individual if do_simplify is set to True.
>>> import numpy as np
>>> from SimpleGP import GP
First let us create a simple regression problem
>>> _ = np.random.RandomState(0)
>>> x = np.linspace(0, 1, 100)
>>> pol = np.array([0.2, -0.3, 0.2])
>>> X = np.vstack((x**2, x, np.ones(x.shape[0]))).T
>>> f = (X * pol).sum(axis=1)
The objective is to find an expresion that approximate f
>>> s = GP.init_cl(max_length=100).train(x[:, np.newaxis], f)
>>> s.run()
True
The expression found is:
>>> ex = s.print_infix()
Eval the best expression found with a new inputs
>>> x1 = np.linspace(-1, 1, 100)
>>> pr = s.predict(x1[:, np.newaxis])
"""
def __init__(self,
func=['+', '-', '*', '/'],
mutation_depth=5, min_length=2,
nrandom=100, max_length=262143, verbose=False,
max_depth=7, max_length_subtree=np.inf,
min_depth=1, pgrow=0.5, pleaf=None,
verbose_nind=None, argmax_nargs=None,
do_simplify=True, max_n_worst_epochs=3, ppm=0.0,
type_xpoint_selection=0,
**kwargs):
super(GP, self).__init__(**kwargs)
self.individuals_params(do_simplify, min_depth,
max_depth, max_length, min_length,
max_length_subtree)
self.constants_params(nrandom)
self.genetic_operators_params(pgrow, pleaf, ppm,
mutation_depth)
self.st_params()
self.max_length_checks()
self.function_set(func, argmax_nargs)
self.format_params(verbose, verbose_nind)
self.eval_params(max_n_worst_epochs)
self.min_max_length_params()
self.tree_params(type_xpoint_selection)
def individuals_params(self, do_simplify, min_depth,
max_depth, max_length, min_length,
max_length_subtree):
self._ind_generated_c = None
self._do_simplify = do_simplify
self._simplify = None
self.nodes_evaluated = 0
self._min_depth = min_depth
self._max_depth = max_depth
self._max_length = max_length
self._min_length = min_length
self._max_length_subtree = max_length_subtree
self._doing_tree = 0
self._computing_fitness = 0
def max_length_checks(self):
if 2**self._min_depth < self._min_length:
self._min_depth = int(np.ceil(np.log2(self._min_length)))
try:
depth = int(np.ceil(np.log2(self._max_length)))
if depth < self._max_depth:
self._max_depth = depth
if depth < self._mutation_depth:
self._mutation_depth = depth
if self._mutation_depth <= self._min_depth:
self._mutation_depth = self._min_depth + 1
except OverflowError:
pass
def genetic_operators_params(self, pgrow, pleaf, ppm,
mutation_depth):
self._pgrow = pgrow
self._pleaf = pleaf
self._ppm = ppm
self._mutation_depth = mutation_depth
def constants_params(self, nrandom):
self._nrandom = nrandom
self.create_random_constants()
t = self._nrandom if self._nrandom >= 100 else 100
self._constants2 = np.zeros(t).astype(self._dtype)
def create_random_constants(self):
self._constants = np.random.uniform(-10,
10,
self._nrandom).astype(self._dtype)
def function_set(self, _func, argmax_nargs=None):
"""
This function set from all the functions available which ones
form the function set
"""
self._output = 0
self._output_pos = 15
func = ['+', '-', '*', '/', 'abs', 'exp', 'sqrt',
'sin', 'cos', 'sigmoid', 'if', 'max', 'min',
'ln', 'sq', 'output', 'argmax']
self._nop = np.array([2, 2, 2, 2, 1, 1, 1, 1, 1, 1,
3, 2, 2, 1, 1, -1, -1])
self._func = np.asarray(func)
self.__set_narg_to_argmax(argmax_nargs)
key = {}
[key.setdefault(v, k) for k, v in enumerate(self._func)]
_func = filter(lambda x: x in key, _func)
_func = filter(lambda x: x != 'output', _func)
self._func_allow = np.array(map(lambda x: key[x], _func),
dtype=np.int)
self._max_nargs = self._nop[self._func_allow].max()
if (self._func_allow == 16).sum() and self._nop[16] == -1:
raise Exception("It is not specified the number\
of argmax arguments")
def available_functions(self):
return self._func
def __set_narg_to_argmax(self, nargs):
"""Setting the number of arguments to argmax"""
if nargs is None:
return
self._nop[16] = nargs
def eval_params(self, max_n_worst_epochs):
self._eval = None
self._use_cache = False
self._max_n_worst_epochs = max_n_worst_epochs
def format_params(self, verbose, verbose_nind):
self._verbose = verbose
if self._stats:
self.length_per_gen = np.zeros(self._gens)
self._left_p = "("
self._right_p = ")"
if verbose_nind is None:
self._verbose_nind = self._popsize
else:
self._verbose_nind = verbose_nind
def st_params(self):
self._st = None
self._p_der_st = None
self._error_st = None
def min_max_length_params(self, minimum=None, maximum=None):
if minimum is not None:
self._min_length = minimum
if maximum is not None:
self._max_length = maximum
def tree_params(self, type_xpoint_selection=0):
self._tree_length = np.empty(self._max_length,
dtype=np.int)
self._tree_mask = np.empty(self._max_length,
dtype=np.int)
self._tree = Tree(self._nop,
self._tree_length,
self._tree_mask,
self._min_length,
self._max_length,
type_xpoint_selection=type_xpoint_selection)
@property
def popsize(self):
"""Population size"""
return self._popsize
@popsize.setter
def popsize(self, popsize):
"""Set the population size, it handles the case where the new
population size is smaller or larger than the current one
"""
if self._popsize == popsize:
return
if self._popsize > popsize:
index = self._fitness.argsort()[::-1][:popsize]
self._p = self._p[index]
self._fitness = self._fitness[index]
self._p_constants = self._p_constants[index]
else:
d = popsize - self._popsize
self._p.resize(popsize)
self._p_constants.resize(popsize)
for i, ind, cons in self.create_population_generator(d):
pos = i + self._popsize
self._p[pos] = ind
self._p_constants[pos] = cons
self._popsize = popsize
@property
def nfunc(self):
"""Number of function in the function set"""
return self._nop.shape[0]
def simplify(self, ind, constants=None):
k = ind
if isinstance(ind, types.IntType):
ind = self._p[k]
constants = self._p_constants[k]
if constants is None:
constants = self._constants
if not self._do_simplify:
c = np.where(ind >= (self._nop.shape[0] + self._x.shape[1]))[0]
cons = np.zeros(c.shape[0], dtype=self._dtype)
ncons = (self._nop.shape[0] + self._x.shape[1])
for _k, v in enumerate(c):
cons[_k] = constants[ind[v] - ncons]
ind[v] = _k + ncons
self._ind_generated_c = cons
if isinstance(k, types.IntType):
self._p[k] = ind
self._p_constants[k] = self._ind_generated_c
return ind
if ind.shape[0] >= self._constants2.shape[0]:
self._constants2 = np.zeros(ind.shape[0]).astype(self._dtype)
self._simplify.set_constants(self._constants2)
ind = self._simplify.simplify(ind, constants)
ncons = self._simplify.get_nconstants()
self._ind_generated_c = self._constants2[:ncons].copy()
if isinstance(k, types.IntType):
self._p[k] = ind
self._p_constants[k] = self._ind_generated_c
return ind
def train(self, x, f):
super(GP, self).train(x, f)
self._eval = Eval(0, self._x.shape[1],
self._nop, self._max_nargs)
self._st = None
self._p_der_st = None
self._error_st = None
self._simplify = Simplify(x.shape[1], self._nop)
self._simplify.set_constants(self._constants2)
self._tree.set_nvar(self._x.shape[1])
return self
def predict(self, X, ind=None):
if ind is None:
ind = self.get_best()
x = self._x.copy()
init, end = 0, None
Xs = X.shape[0]
xs = x.shape[0]
pr = None
npcon = np.concatenate
while end is None or init < Xs:
end = xs if (init + xs) < Xs else Xs - init
self._x[:end] = X[init:(end+init)]
init += xs
_pr = self.eval(ind).copy()
pr = _pr[:end] if pr is None else npcon((pr, _pr[:end]))
self._x[:] = x[:]
self.eval(ind)
return pr
def create_population_generator(self, popsize=None):
if popsize is None:
popsize = self._popsize
depth = self._max_depth
for i in range(popsize):
ind = self.create_random_ind(depth=depth)
cons = self._ind_generated_c
depth -= 1
if depth < self._min_depth:
depth = self._max_depth
yield (i, ind, cons)
def create_population(self):
if self._fname_best is not None and\
os.path.isfile(self._fname_best)\
and self.load_prev_run():
return
self._p_constants = np.empty(self._popsize, dtype=np.object)
self._p = np.empty(self._popsize, dtype=np.object)
self._fitness = np.zeros(self._popsize)
self._fitness[:] = -np.inf
for i, ind, cons in self.create_population_generator():
self._p[i] = ind
self._p_constants[i] = cons
def isfunc(self, a):
return a < self._nop.shape[0]
def isvar(self, a):
nfunc = self._nop.shape[0]
nvar = self._x.shape[1]
return (a >= nfunc) and (a < nfunc+nvar)
def isconstant(self, a):
nfunc = self._nop.shape[0]
nvar = self._x.shape[1]
return a >= nfunc+nvar
def any_constant(self, ind):
return self._tree.any_constant(ind)
def random_func(self, first_call=False):
k = np.random.randint(self._func_allow.shape[0])
return self._func_allow[k]
def random_leaf(self):
if np.random.rand() < 0.5 or self._nrandom == 0:
l = np.random.randint(self._x.shape[1])
return l + self._func.shape[0]
else:
l = np.random.randint(self._constants.shape[0])
return l + self._func.shape[0] + self._x.shape[1]
def create_random_ind_full(self, depth=3, first_call=True,
**kwargs):
res = self.create_random_ind_full_inner(depth=depth,
first_call=first_call)
if isinstance(res, types.ListType):
res = np.asarray(res)
else:
res = np.asarray([res])
ind = res
ind = self.simplify(ind)
return ind
def create_random_ind_full_inner(self, depth=3, first_call=False):
if depth == 0:
return self.random_leaf()
else:
op = self.random_func(first_call)
if isinstance(op, types.ListType):
return op
nargs = self._nop[op]
res = [op]
for i in range(nargs):
if first_call:
self._doing_tree = i
tmp = self.create_random_ind_full_inner(depth-1)
if isinstance(tmp, types.ListType):
res += tmp
else:
res.append(tmp)
return res
def create_random_ind_grow(self, depth=3,
first_call=True,
**kwargs):
res = self.create_random_ind_grow_inner(depth=depth,
first_call=first_call)
if isinstance(res, types.ListType):
res = np.asarray(res)
else:
res = np.asarray([res])
ind = res
ind = self.simplify(ind)
return ind
def create_random_ind_grow_inner(self, depth=3, first_call=False):
if depth == 0:
return self.random_leaf()
elif first_call or np.random.rand() < 0.5:
op = self.random_func(first_call)
if isinstance(op, types.ListType):
return op
nargs = self._nop[op]
res = [op]
for i in range(nargs):
if first_call:
self._doing_tree = i
tmp = self.create_random_ind_grow_inner(depth-1)
if isinstance(tmp, types.ListType):
res += tmp
else:
res.append(tmp)
return res
else:
return self.random_leaf()
def create_random_ind(self, depth=4, first_call=True):
ind = None
while (ind is None or ind.shape[0] > self._max_length
or ind.shape[0] < self._min_length):
if np.random.rand() < self._pgrow:
ind = self.create_random_ind_grow(depth=depth,
first_call=first_call)
else:
ind = self.create_random_ind_full(depth=depth,
first_call=first_call)
if depth > self._min_depth:
depth -= 1
return ind
def traverse(self, ind, pos=0):
return self._tree.traverse(ind, pos=pos)
def subtree_selection(self, father):
if father.shape[0] == 1:
return 0
if self._pleaf is None:
return np.random.randint(father.shape[0])
if np.random.rand() < 1 - self._pleaf:
points = np.where(father < self._func.shape[0])[0]
if points.shape[0] > 0:
point = np.random.randint(points.shape[0])
return points[point]
points = np.where(father >= self._func.shape[0])[0]
point = np.random.randint(points.shape[0])
return points[point]
def length(self, ind):
# l_p2 = np.zeros_like(ind)
p = self._tree.length(ind)
return self._tree_length[:p]
def crossover(self, father1, father2, p1=-1,
p2=-1):
"""
Performs subtree crossover. p1 and p2 are the crossing points
where -1 indicates that these points are computed in self._tree
"""
ncons = self._p_constants[self._xo_father1].shape[0]
ind = self._tree.crossover(father1,
father2,
ncons=ncons,
p1=p1, p2=p2)
if self._xo_father2 is not None:
c2 = self._p_constants[self._xo_father2]
else:
c2 = self._constants2
constants = np.concatenate((self._p_constants[self._xo_father1], c2))
ind = self.simplify(ind, constants)
if ind.shape[0] > self._max_length or ind.shape[0] < self._min_length:
return self.create_random_ind()
return ind
def point_mutation(self, father1):
try:
cl_nop = self._cl_nop
except AttributeError:
cl_nop = {}
for id in np.unique(self._nop):
cl_nop[id] = np.where(self._nop == id)[0]
self._cl_nop = cl_nop
ind = father1.copy()
ele = int(np.ceil(ind.shape[0] / 10.))
index = np.arange(ind.shape[0])
np.random.shuffle(index)
index = index[:ele]
for i in index:
op = ind[i]
if op < self._nop.shape[0]:
a = cl_nop[self._nop[op]]
np.random.shuffle(a)
ind[i] = a[0]
else:
ind[i] = self.random_leaf()
return ind
def mutation(self, father1):
if np.random.rand() < self._ppm:
return self.point_mutation(father1)
d = np.random.randint(self._min_depth,
self._mutation_depth)
father2 = self.create_random_ind(depth=d,
first_call=True)
self._xo_father2 = None
ind = self.crossover(father1, father2)
return ind
def set_extras_to_ind(self, k, *args, **kwargs):
pass
def eval(self, ind=None, **kwargs):
if ind is None:
ind = self.get_best()
self._computing_fitness = None
if isinstance(ind, types.IntType):
self._computing_fitness = ind
return self.eval_ind(self._p[ind],
pos=0,
constants=self._p_constants[ind],
**kwargs)
return self.eval_ind(ind, **kwargs)
def get_p_der_st(self, ind):
if self._p_der_st is None:
self._error_st = np.ones_like(self._st)
self._p_der_st = np.ones((self._st.shape[0],
self._st.shape[1]*self._max_nargs),
dtype=self._dtype,
order='C')
elif self._p_der_st.shape[0] < ind.shape[0]:
self._error_st.resize(self._st.shape)
self._p_der_st.resize((self._st.shape[0],
self._st.shape[1]*self._max_nargs))
self._error_st.fill(1)
self._p_der_st.fill(1)
return self._error_st, self._p_der_st
def get_st(self, ind):
if self._st is None:
self._st = np.empty((ind.shape[0], self._x.shape[0]),
dtype=self._dtype, order='C')
elif self._st.shape[0] < ind.shape[0]:
self._st.resize((ind.shape[0], self._x.shape[0]))
return self._st
def eval_ind(self, ind, pos=0, constants=None):
c = constants if constants is not None else self._constants
self.nodes_evaluated += ind.shape[0]
st = self.get_st(ind)
e = self._eval
e.set_pos(0)
e.eval_ind(ind,
self._x,
st,
c)
g = st[self._output].T
return g
def fitness(self, ind):
self._use_cache = False
self._computing_fitness = None
k = ind
if isinstance(ind, types.IntType):
self._computing_fitness = ind
if self._fitness[k] > -np.inf:
self._use_cache = True
return self._fitness[k]
constants = self._p_constants[ind]
ind = self._p[ind]
else:
constants = self._ind_generated_c
f = self.eval_ind(ind, pos=0, constants=constants)
f = - self.distance(self._f, f)
if np.isnan(f):
f = -np.inf
if isinstance(k, types.IntType):
self.set_extras_to_ind(k, ind=ind,
constants=constants)
self._fitness[k] = f
if self._best_fit is None or self._best_fit < f:
self._best_fit = f
self.new_best(k)
return self._best_fit
return f
def pre_crossover(self, father1=None, father2=None):
"""
This function is called before calling crossover, here
it is set _xo_father1 and _xo_father2 which contains
the position where the parents are.
"""
father1 = self.tournament() if father1 is None else father1
father2 = self.tournament() if father2 is None else father2
while not super(GP,
self).pre_crossover(father1,
father2):
father2 = self.tournament()
self._xo_father1 = father1
self._xo_father2 = father2
if self._tree.equal_non_const(self._p[father1],
self._p[father2]):
return False
return True
def genetic_operators(self):
if np.random.rand() < self._pxo and self.pre_crossover():
son = self.crossover(self._p[self._xo_father1],
self._p[self._xo_father2])
else:
self._xo_father1 = self.tournament()
son = self._p[self._xo_father1]
son = self.mutation(son)
return son
def stats(self):
i = self.gens_ind
if i - self._last_call_to_stats < self._verbose_nind:
return False
self._last_call_to_stats = i
if self._stats:
self.fit_per_gen[i/self._popsize] = self._fitness[self.get_best()]
i_pop = i / self._popsize
self.length_per_gen[i_pop] = np.asarray(map(lambda x: x.shape[0],
self._p)).mean()
if self._verbose:
bf = self._best_fit
if bf is None:
bf = -1.
print "Gen: " + str(i) + "/" + str(self._gens * self._popsize)\
+ " " + "%0.4f" % bf
return True
def kill_ind(self, kill, son):
super(GP, self).kill_ind(kill, son)
self._p_constants[kill] = self._ind_generated_c
self.set_extras_to_ind(kill, son, delete=True)
def run(self, exit_call=True):
self.length_per_gen = np.zeros(self._gens)
self.nodes_evaluated = 0
return super(GP, self).run(exit_call=exit_call)
def print_infix(self, ind=None, pos=0, constants=None):
if ind is None or isinstance(ind, types.IntType):
k = self.get_best() if ind is None else ind
ind = self._p[k]
if hasattr(self, '_p_constants'):
constants = self._p_constants[k]
if constants is None:
constants = self._constants
cdn, pos = self._print_infix(ind, pos=pos, constants=constants)
return cdn
def _print_infix(self, ind, pos=0, constants=None):
if ind[pos] < self._func.shape[0]:
func = self._func[ind[pos]]
nargs = self._nop[ind[pos]]
cdn = " "+func+" "
pos += 1
args = []
for i in range(nargs):
r, pos = self._print_infix(ind, pos, constants=constants)
args.append(r)
if nargs == 1:
cdn = cdn[1:-1]
cdn += self._left_p+args[0]+self._right_p
elif (nargs == 2 and func != 'max' and
func != 'min'):
cdn = cdn.join(args)
else:
cdn += self._left_p+",".join(args)+self._right_p
if nargs == 2:
return "("+cdn+")", pos
else:
return cdn, pos
elif ind[pos] < self._func.shape[0] + self._x.shape[1]:
return "X%s" % (ind[pos] - self._func.shape[0]), pos + 1
else:
c = ind[pos] - self._func.shape[0] - self._x.shape[1]
return str(constants[c]), pos + 1
def graphviz(self, ind=None, constants=None, fname=None,
var_names=None):
import StringIO
if ind is None or isinstance(ind, types.IntType):
k = self.get_best() if ind is None else ind
ind = self._p[k]
if hasattr(self, '_p_constants'):
constants = self._p_constants[k]
if constants is None:
constants = self._constants
self._g_pos = 0
if var_names is None:
self._var_names = map(lambda x: "X%s" % x, range(self._x.shape[1]))
else:
self._var_names = var_names
if isinstance(fname, types.FileType):
s = fname
elif isinstance(fname, StringIO.StringIO):
s = fname
elif fname is None:
s = StringIO.StringIO()
else:
s = open(fname, 'w')
s.write("digraph SimpleGP {\n")
s.write("""edge [dir="none"];\n""")
self._graphviz(ind, constants, s)
s.write("}\n")
if isinstance(s, StringIO.StringIO):
s.seek(0)
print s.read()
def _graphviz(self, ind, constants, fpt):
pos = self._g_pos
self._g_pos += 1
if ind[pos] < self._nop.shape[0]:
func = self._func[ind[pos]]
nargs = self._nop[ind[pos]]
fpt.write("""n%s [label="%s"];\n""" % (pos, func))
for i in range(nargs):
posd = self._graphviz(ind, constants, fpt)
fpt.write("""n%s -> n%s;\n""" % (pos, posd))
return pos
elif ind[pos] < self._func.shape[0] + self._x.shape[1]:
vn = self._var_names[ind[pos] - self._func.shape[0]]
fpt.write("""n%s [label="%s"];\n""" % (pos, vn))
return pos
else:
c = ind[pos] - self._func.shape[0] - self._x.shape[1]
c = constants[c]
fpt.write("""n%s [label="%0.4f"];\n""" % (pos, c))
return pos
def save(self, fname=None):
fname = fname if fname is not None else self._fname_best
if fname is None:
return False
with open(fname, 'w') as fpt:
np.save(fpt, self._p)
np.save(fpt, self._p_constants)
np.save(fpt, self._fitness)
np.save(fpt, self.gens_ind)
np.save(fpt, self.nodes_evaluated)
if self._stats:
np.save(fpt, self.fit_per_gen)
np.save(fpt, self.length_per_gen)
return True
def save_best(self, fname=None):
"""
Save only best individual
"""
bs = self.get_best()
mask = np.ones(self.popsize, dtype=np.bool)
mask[bs] = False
self.population[mask] = None
self._p_constants[mask] = None
return self.save(fname=fname)
def load_prev_run(self):
try:
fpt = open(self._fname_best)
self._p = np.load(fpt)
self._p_constants = np.load(fpt)
arr = filter(lambda x: self._p[x] is None,
range(self._p.shape[0]))
if len(arr):
for i, ind, c in self.create_population_generator(len(arr)):
_i = arr[i]
self.population[_i] = ind
self._p_constants[_i] = c
self._fitness = np.load(fpt)
self.gens_ind = int(np.load(fpt))
self.nodes_evaluated = np.load(fpt)
if self._stats:
self.fit_per_gen = np.load(fpt)
self.length_per_gen = np.load(fpt)
fpt.close()
if self._p.dtype == np.object\
and self._p.shape[0] == self._popsize:
return True
except IOError:
pass
return False
@classmethod
def init_cl(cls, argmax_nargs=2,
func=["+", "-", "*", "/", 'abs', 'exp', 'sqrt',
'sin', 'cos', 'sigmoid', 'if', 'max', 'min',
'ln', 'sq', 'argmax'],
seed=0, **kwargs):
ins = cls(argmax_nargs=argmax_nargs,
func=func, seed=seed, **kwargs)
return ins
@classmethod
def max_time_per_eval(cls, x, y,
popsize=1000,
max_length=1024,
seed=0,
**kwargs):
import time
class G(cls):
def __init__(self, **kwargs):
super(G, self).__init__(**kwargs)
self.wstime = 0
def fitness(self, ind):
init = time.time()
fit = super(G, self).fitness(ind)
t = time.time() - init
if self.wstime < t:
self.wstime = t
return fit
kwargs['func'] = ['+', '*', 'argmax']
kwargs['nrandom'] = 0
kwargs['argmax_nargs'] = 2
kwargs['generations'] = 10
max_depth = int(np.ceil(np.log2(max_length)))
g = G.run_cl(x, y, max_length=max_length,
pgrow=0, verbose=True,
max_depth=max_depth,
min_depth=max_depth-1,
seed=0, popsize=popsize,
**kwargs)
return g.wstime
class GPMAE(GP):
"""
This class exemplifies the change of the distance function.
In the example, the distance is MAE then the derivative of this
function is computed in the method compute_error_pr
"""
def distance(self, y, yh):
return np.fabs(y - yh).mean()
def compute_error_pr(self, ind, pos=0, constants=None, epoch=0):
if epoch == 0:
k = self._computing_fitness
if k is None or (not hasattr(self, '_p_st')
or self._p_st[k] is None):
g = self._st[self._output].T
else:
g = self._p_st[self._computing_fitness][self._output].T
else:
if ind is None:
g = self.eval(self._computing_fitness)
else:
g = self.eval_ind(ind, pos=pos, constants=constants)
e = self._f - g
s = np.sign(e)
e = -1 * s
return e, g
class GPwRestart(GP):
def __init__(self, ntimes=2, **kwargs):
super(GPwRestart, self).__init__(**kwargs)
self._ntimes = ntimes
def create_population(self, flag=False):
if flag or not hasattr(self, '_p'):
super(GPwRestart, self).create_population()
self._fitness.fill(-np.inf)
def stats(self):
verbose = self._verbose
self._verbose = False
flag = super(GPwRestart, self).stats()
self._verbose = verbose
if not flag:
return flag
if self._verbose:
i = self.gens_ind
bf = self._best_fit
if bf is None:
bf = -1.
print "Gen: (" + str(self._cnt_ntimes) + ") " + str(i) + "/" +\
str(self._gens * self._popsize)\
+ " " + "%0.4f" % bf
return flag
def run(self, exit_call=True):
"""
This methods repeats the evolutionary process as many times as
indicated in ntimes, the best individual found is kept during
this process.
"""
self._cnt_ntimes = 0
ntimes = self._ntimes
while not self._timeout and (self._cnt_ntimes < ntimes or ntimes <= 0):
self._cnt_ntimes += 1
self.init()
nodes_evaluated = self.nodes_evaluated
flag = super(GPwRestart, self).run(exit_call=False)
nodes_evaluated += self.nodes_evaluated
if not flag:
self.nodes_evaluated = nodes_evaluated
if exit_call:
self.on_exit()
return False
map(self.fitness, range(self._p.shape[0]))
ind = self._p[self.get_best()].copy()
cons = self._p_constants[self.get_best()].copy()
fit = self.fitness(self.get_best())
self.create_population(flag=True)
self._p[0] = ind
self._p_constants[0] = cons
self._fitness[0] = fit
if exit_call:
self.on_exit()
self.nodes_evaluated = nodes_evaluated
return flag
if __name__ == '__main__':
import doctest
doctest.testmod()
| Python |
# Copyright 2013 Mario Graff Guerrero
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from SimpleGP.simplegp import GP
from SimpleGP.gppde import GPPDE
from SimpleGP.tree import Tree, SubTree, PDEXOSubtree
class GPForest(GP):
def __init__(self, ntrees=None, **kwargs):
self._ntrees = None
super(GPForest, self).__init__(**kwargs)
def train(self, x, f):
super(GPForest, self).train(x, f)
if self._ntrees is not None:
self._output = np.empty(self._ntrees, dtype=np.int)
else:
if f.ndim == 1:
self._output = np.empty(np.unique(f).shape[0], dtype=np.int)
else:
self._output = np.empty(f.shape[1], dtype=np.int)
self._ntrees = self._output.shape[0]
self._eval.set_output_function(self._output)
self._nop[self._output_pos] = self._ntrees
return self
def min_max_length_params(self, minimum=None, maximum=None):
if minimum is not None:
self._min_length = minimum
if self._min_length == 0:
self._min_length = 1
if maximum is not None:
self._max_length = maximum
def tree_params(self, type_xpoint_selection=0):
self._tree_length = np.empty(self._max_length,
dtype=np.int)
self._tree_mask = np.empty(self._max_length,
dtype=np.int)
self._tree = Tree(self._nop,
self._tree_length,
self._tree_mask,
self._min_length,
self._max_length,
select_root=0,
type_xpoint_selection=type_xpoint_selection)
def random_func(self, first_call=False):
if first_call:
return self._output_pos
return super(GPForest, self).random_func(first_call=first_call)
class SubTreeXO(GPForest):
def tree_params(self, type_xpoint_selection=0):
self._tree_length = np.empty(self._max_length,
dtype=np.int)
self._tree_mask = np.empty(self._max_length,
dtype=np.int)
self._tree = SubTree(self._nop,
self._tree_length,
self._tree_mask,
self._min_length,
self._max_length,
select_root=0,
type_xpoint_selection=type_xpoint_selection)
class SubTreeXOPDE(GPPDE, GPForest):
def train(self, x, f):
super(SubTreeXOPDE, self).train(x, f)
if self._ntrees is not None:
self._output = np.empty(self._ntrees, dtype=np.int)
else:
if f.ndim == 1:
self._output = np.empty(np.unique(f).shape[0], dtype=np.int)
else:
self._output = np.empty(f.shape[1], dtype=np.int)
self._ntrees = self._output.shape[0]
self._eval.set_output_function(self._output)
self._nop[self._output_pos] = self._ntrees
return self
def get_error(self, p1):
self._computing_fitness = self._xo_father1
ind = self._p[self._xo_father1]
pos = 1
self._output[0] = pos
for i in range(self._output.shape[0] - 1):
pos = self._tree.traverse(ind, pos)
self._output[i+1] = pos
e, g = self.compute_error_pr(None)
self._p_der[self._output] = e.T
self._pde.compute(self._p[self._xo_father1], p1,
self._p_st[self._xo_father1])
e = np.sign(self._p_der[p1])
return e
def tree_params(self, type_xpoint_selection=0):
self._tree_length = np.empty(self._max_length,
dtype=np.int)
self._tree_mask = np.empty(self._max_length,
dtype=np.int)
self._tree = PDEXOSubtree(self._nop,
self._tree_length,
self._tree_mask,
self._min_length,
self._max_length,
select_root=0,
type_xpoint_selection=type_xpoint_selection)
| Python |
# Copyright 2013 Mario Graff Guerrero
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import types
import os
import signal
class SimpleGA(object):
"""
SimpleGA is a steady state genetic algorithm with tournament selection,
uniform crossover and mutation.
>>> import numpy as np
>>> from SimpleGP import SimpleGA
First let us create a simple regression problem
>>> _ = np.random.RandomState(0)
>>> x = np.linspace(0, 1, 100)
>>> pol = np.array([0.2, -0.3, 0.2])
>>> X = np.vstack((x**2, x, np.ones(x.shape[0]))).T
>>> f = (X * pol).sum(axis=1)
The objective is to find the coefficients 0.2, -0.3, and 0.2
>>> s = SimpleGA.init_cl().train(X, f)
>>> s.run()
True
The coefficients are:
>>> print s._p[s.get_best()]
[ 0.10430681 -0.18460194 0.17084382]
"""
def __init__(self, popsize=1000, ppm=0.1, chromosome_length=3,
tournament_size=2, generations=50, seed=None, verbose=False,
pxo=0.9, pm=0.2, stats=False, fname_best=None,
walltime=None,
dtype=np.float):
self._popsize = popsize
self._ppm = 1 - ppm
self._tsize = tournament_size
self._gens = generations
self._pxo = pxo
self._pm = pm
self._verbose = verbose
self._chromosome_length = chromosome_length
self.gens_ind = popsize
self._dtype = dtype
self._timeout = False
self._stats = stats
if stats:
self.fit_per_gen = np.zeros(self._gens)
self.set_seed(seed)
self._best_fit = None
self._fname_best = fname_best
self._run = True
self._last_call_to_stats = 0
self._test_set = None
signal.signal(signal.SIGTERM, self.on_exit)
if walltime is not None:
signal.signal(signal.SIGALRM, self.walltime)
signal.alarm(walltime)
@property
def population(self):
return self._p
@property
def popsize(self):
"""Population size"""
return self._popsize
@popsize.setter
def popsize(self, popsize):
"""Set the population size, it handles the case where the new
population size is smaller or larger than the current one
"""
if self._popsize == popsize:
return
if self._popsize > popsize:
index = self._fitness.argsort()[::-1][:popsize]
self._p = self._p[index]
self._fitness = self._fitness[index]
else:
d = popsize - self._popsize
cl = self._chromosome_length
self._p.resize((popsize, cl))
self._p[self._popsize:] = self.random_ind(size=((d, cl)))
self._popsize = popsize
@property
def generations(self):
"""Number of generations"""
return self._gens
@generations.setter
def generations(self, v):
self._gens = v
def test_f(self, x):
"""This method test whether the prediction is valid. It is called from
new_best. Returns True when x is a valid prediction
"""
return ((not np.any(np.isnan(x))) and
(not np.any(np.isinf(x))))
def new_best(self, k):
"""
This method is called when the best so far is beaten by k.
Here is verified that the best individual is capable of
predicting the test set, in the case it is given.
"""
if self._test_set is not None:
x = self._test_set
r = self.predict(x, k)
if not self.test_f(r):
self._best_fit = None
self._fitness[k] = -np.inf
def init(self):
"""
Setting some variables to the defaults values
"""
self.gens_ind = 0
self._run = True
self._last_call_to_stats = 0
self._best_fit = None
def walltime(self, *args, **kwargs):
"""
This method is called when the maximum number of seconds is reached.
"""
self.on_exit(*args, **kwargs)
self._timeout = True
def on_exit(self, *args, **kwargs):
"""
Method called at the end of the evolutionary process or when a
signal is received
"""
self.save()
self._run = False
def set_seed(self, seed):
if seed is not None:
np.random.seed(seed)
def set_test(self, x):
"""
x is the set test, this is used to test, during the evolution, that
the best individual does not produce nan or inf
"""
self._test_set = x.astype(self._dtype, copy=False, order='C')
def train(self, x, f):
"""
This is to set the training set.
x and f are copy only if their types are not dtype
"""
self._x = x.astype(self._dtype, copy=False, order='C')
self._f = f.astype(self._dtype, copy=False, order='C')
return self
def crossover(self, father1, father2):
"""
crossover performs an uniform crossover
"""
mask = np.random.binomial(1, 0.5, self._p.shape[1]).astype(np.bool)
return father1 * mask + father2 * ~mask
def random_ind(self, size=None):
"""
Create a random individual
"""
size = size if size is not None else self._p.shape[1]
return np.random.uniform(-1, 1, size)
def mutation(self, father1):
"""
Mutation performs an uniform mutation with point mutation probability
set by ppm
"""
father2 = self.random_ind()
mask = np.random.binomial(1, self._ppm,
self._p.shape[1]).astype(np.bool)
return father1 * mask + father2 * ~mask
def selection(self, *args, **kwargs):
"""
Select a individual from the population.
"""
return self.tournament(*args)
def tournament(self, neg=False):
"""
Tournament selection, it also performs negative tournament selection if
neg=True
"""
if not neg:
func_cmp = lambda x, y: x < y
else:
func_cmp = lambda x, y: x > y
best = np.random.randint(self._popsize) if self._popsize > 2 else 0
for i in range(self._tsize-1):
comp = np.random.randint(self._popsize) if self._popsize > 2 else 1
while comp == best:
comp = np.random.randint(self._popsize)
if func_cmp(self.fitness(best), self.fitness(comp)):
best = comp
return best
def load_prev_run(self):
"""
Method used to load a previous run. It returns False if fails
"""
try:
fpt = open(self._fname_best)
self._p = np.load(fpt)
self._fitness = np.load(fpt)
self.gens_ind = np.load(fpt)
fpt.close()
if self._p.ndim == 2 and self._p.shape[0] == self._popsize \
and self._p.shape[1] == self._chromosome_length:
return True
except IOError:
pass
return False
def create_population(self):
"""
Create the initial population. It first called load_prev_run if
this method returns False then it creates the population.
"""
if self._fname_best is not None \
and os.path.isfile(self._fname_best) \
and self.load_prev_run():
return
self._p = self.random_ind(size=(self._popsize,
self._chromosome_length))
self._fitness = np.zeros(self._popsize)
self._fitness[:] = -np.inf
def eval(self, ind):
"""
Evaluate a individual it receives the actual individual, i.e., the
chromosomes
"""
return (self._x * ind).sum(axis=1)
def predict(self, X, ind=None):
"""
Outputs the evaluation of the (ind)-th individual when the
features are X
"""
if ind is None:
ind = self.get_best()
return (X * self._p[ind]).sum(axis=1)
def distance(self, y, hy):
"""
Sum of squares errors
"""
return ((y - hy)**2).mean()
def fitness(self, ind):
"""
Computes the fitness of ind. If ind is an integer, then it
computes the fitness of the (ind)-th individual only if it has
not been previously computed.
"""
k = ind
if isinstance(ind, types.IntType):
if self._fitness[k] > -np.inf:
return self._fitness[k]
ind = self._p[ind]
f = self.eval(ind)
f = - self.distance(self._f, f)
if np.isnan(f):
f = -np.inf
if isinstance(k, types.IntType):
self._fitness[k] = f
if self._best_fit is None or self._best_fit < f:
self._best_fit = f
self.new_best(k)
return self._best_fit
return f
def get_best(self):
"""
Get the position of the best individual
"""
return int(self._fitness.argmax())
def pre_crossover(self, father1, father2):
"""
This function is call before calling crossover, the idea
is to test that the fathers are different.
It returns True when the fathers are different
"""
return not (father1 == father2)
def genetic_operators(self):
"""
Perform the genetic operations.
"""
son = None
if np.random.rand() < self._pxo:
father1 = self.tournament()
father2 = self.tournament()
while not self.pre_crossover(father1, father2):
father2 = self.tournament()
son = self.crossover(self._p[father1], self._p[father2])
if np.random.rand() < self._pm:
son = son if son is not None else self._p[self.tournament()]
son = self.mutation(son)
son = son if son is not None else self.random_ind()
return son
def kill_ind(self, kill, son):
"""
Replace the (kill)-th individual with son
"""
self._p[kill] = son
self._fitness[kill] = -np.inf
def stats(self):
"""This function is call every time an offspring is created. The
original idea is to print only statistics of the evolutionary process;
however, at this stage is also used to verify the memory in GPPDE.
This function is executed at the end of each generation and it returns
False if this is not the case, otherwise returns True.
"""
i = self.gens_ind
if i - self._last_call_to_stats < self._popsize:
return False
self._last_call_to_stats = i
if self._stats:
self.fit_per_gen[i/self._popsize] = self._fitness[self.get_best()]
if self._verbose:
print "Gen: " + str(i) + "/" + str(self._gens * self._popsize) + \
" " + "%0.4f" % self._fitness[self.get_best()]
return True
def run(self, exit_call=True):
"""
Steady state genetic algorithm. Returns True if the evolution
ended because the number of evaluations is reached. It returns False
if it receives a signal or finds a perfect solution.
The flag self._run is used to stop the evolution.
"""
self.create_population()
while (not self._timeout and
self.gens_ind < self._gens*self._popsize and self._run):
try:
son = self.genetic_operators()
kill = self.tournament(neg=True)
self._kill_ind = kill
self.kill_ind(kill, son)
self.gens_ind += 1
self.stats()
except KeyboardInterrupt:
if exit_call:
self.on_exit()
return False
flag = True
if not self._run:
flag = False
if exit_call:
self.on_exit()
return flag
def save(self, fname=None):
"""
Save the population to fname if fname is None the save in
self._fname_best. If both are None then it does nothing.
"""
fname = fname if fname is not None else self._fname_best
if fname is None:
return
with open(fname, 'w') as fpt:
np.save(fpt, self._p)
np.save(fpt, self._fitness)
np.save(fpt, self.gens_ind)
if self._stats:
np.save(fpt, self.fit_per_gen)
@classmethod
def init_cl(cls, generations=10000,
popsize=3, pm=0.1, pxo=0.9, seed=0,
**kwargs):
"""
Create a new instance of the class.
"""
ins = cls(generations=generations,
popsize=popsize,
seed=seed,
pxo=pxo,
**kwargs)
return ins
@classmethod
def run_cl(cls, x, f, test=None, ntries=10,
**kwargs):
"""
Returns a trained system that does not output nan or inf neither
in the training set (i.e., x) or test set (i.e., test).
"""
if 'seed' in kwargs:
seed = kwargs['seed']
if seed is not None:
seed = int(seed)
else:
seed = 0
test_f = lambda x: ((not np.any(np.isnan(x))) and
(not np.any(np.isinf(x))))
kwargs['seed'] = seed
for i in range(ntries):
ins = cls.init_cl(**kwargs).train(x, f)
if test is not None:
ins.set_test(test)
ins.run()
r = ins.predict(x)
if test_f(r):
if test is not None:
if test_f(ins.predict(test)):
return ins
else:
return ins
kwargs['seed'] = None
return None
if __name__ == "__main__":
import doctest
doctest.testmod()
| Python |
# Copyright 2013 Mario Graff Guerrero
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from SimpleGP.simplegp import GP
from SimpleGP.pde import PDE
from SimpleGP.tree import PDEXO
from SimpleGP.Rprop_mod import RPROP2
class GPPDE(GP):
def __init__(self, max_mem=300.0,
update_best_w_rprop=False,
**kwargs):
super(GPPDE, self).__init__(**kwargs)
self._max_mem = max_mem
self._update_best_w_rprop = update_best_w_rprop
self._p_st = np.empty(self._popsize, dtype=np.object)
self._used_mem = 0
def new_best(self, k):
super(GPPDE, self).new_best(k)
fit = self._best_fit
if not self._update_best_w_rprop or fit is None:
return None
self.rprop(k)
if self._fitness[k] > fit:
self._best_fit = self._fitness[k]
return super(GPPDE, self).new_best(k)
def stats(self):
flag = super(GPPDE, self).stats()
self.free_mem()
return flag
def update_mem(self, d, sign=1):
if d is not None:
d = d.nbytes / 1024. / 1024.
self._used_mem += (d * sign)
def max_mem_per_individual(self, xs=None):
if xs is None:
xs = self._x.shape[0]
p_st = np.empty((self._max_length, xs),
dtype=self._dtype, order='C').nbytes
p_der_st = np.ones((self._max_length,
xs),
dtype=self._dtype,
order='C').nbytes
return (p_der_st / 1024. / 1024.,
p_st / 1024. / 1024.)
def train(self, x, f):
super(GPPDE, self).train(x, f)
self.free_mem()
self._p_der = np.empty((self._max_length, self._x.shape[0]),
dtype=self._dtype)
self._pde = PDE(self._tree, self._p_der)
return self
def load_prev_run(self):
r = super(GPPDE, self).load_prev_run()
if r:
self._fitness.fill(-np.inf)
gens_ind = self.gens_ind
for i in range(self._popsize):
self.fitness(i)
self.gens_ind = gens_ind
return r
def mem(self):
"""
Memory used
"""
return self._used_mem
def free_mem(self):
"""
This method free the memory when the memory used is more than
self._max_mem
"""
if self.mem() < self._max_mem:
return None
for i in range(self._popsize):
self.update_mem(self._p_st[i], -1)
self._p_st[i] = None
if hasattr(self, '_fitness'):
self._fitness[i] = -np.inf
def mutation(self, father1):
kill = self.tournament(neg=True)
while kill == self._xo_father1:
kill = self.tournament(neg=True)
d = np.random.randint(self._min_depth,
self._mutation_depth)
son = self.create_random_ind(depth=d,
first_call=True)
self.kill_ind(kill, son)
self._xo_father2 = kill
self.fitness(kill)
ind = self.crossover(father1, son)
return ind
def tree_params(self, type_xpoint_selection=0):
self._tree_length = np.empty(self._max_length,
dtype=np.int)
self._tree_mask = np.empty(self._max_length,
dtype=np.int)
self._tree = PDEXO(self._nop,
self._tree_length,
self._tree_mask,
self._min_length,
self._max_length,
type_xpoint_selection=type_xpoint_selection)
def get_error(self, p1):
self._computing_fitness = self._xo_father1
e, g = self.compute_error_pr(None)
self._p_der[self._output] = e.T
self._pde.compute(self._p[self._xo_father1], p1,
self._p_st[self._xo_father1])
e = np.sign(self._p_der[p1])
return e
def crossover(self, father1, father2, p1=-1, p2=-1,
force_xo=False):
if p1 == -1:
p1 = self._tree.father1_crossing_point(father1)
if p2 == -1:
e = self.get_error(p1)
s = self._p_st[self._xo_father2]
p = self._p_st[self._xo_father1][p1]
self._tree.father2_xp_extras(e, p, s)
p2 = self._tree.father2_crossing_point(father1, father2, p1)
return super(GPPDE, self).crossover(father1, father2,
p1, p2)
def get_st(self, ind):
if self._computing_fitness is None:
if self._st is None:
self._st = np.empty((ind.shape[0], self._x.shape[0]),
dtype=self._dtype, order='C')
elif self._st.shape[0] < ind.shape[0]:
self._st.resize((ind.shape[0], self._x.shape[0]))
return self._st
else:
k = self._computing_fitness
l = ind.shape[0]
if self._p_st[k] is None:
self._p_st[k] = np.empty((ind.shape[0], self._x.shape[0]),
dtype=self._dtype, order='C')
self.update_mem(self._p_st[k])
elif self._p_st[k].shape[0] < l:
self.update_mem(self._p_st[k], -1)
self._p_st[k].resize(l, self._x.shape[0])
self.update_mem(self._p_st[k])
return self._p_st[k]
def compute_error_pr(self, ind, pos=0, constants=None, epoch=0):
if epoch == 0:
g = self._p_st[self._computing_fitness][self._output].T
else:
if ind is None:
g = self.eval(self._computing_fitness)
else:
g = self.eval_ind(ind, pos=pos, constants=constants)
# e = - 2 * ( self._f - g)
e = 2 * (g - self._f)
return e, g
def rprop(self, k, epochs=10000):
"""Update the constants of the tree using RPROP"""
self._computing_fitness = k
ind = self._p[k]
constants = self._p_constants[k]
self._computing_fitness = k
if not self.any_constant(ind):
return None
best_cons = constants.copy()
fit_best = self._fitness[k]
epoch_best = 0
rprop = RPROP2(ind, constants,
self._p_der, self._tree)
e, g = self.compute_error_pr(None)
self._p_der[self._output] = e.T
for i in range(epochs):
if i > 0:
self.gens_ind += 1
self._pde.compute_constants(ind, self._p_st[k])
rprop.update_constants_rprop()
e, g = self.compute_error_pr(None, epoch=i)
fit = - self.distance(self._f, g)
if fit > fit_best and not np.isnan(fit):
fit_best = fit
best_cons = constants.copy()
epoch_best = i
if i < epochs - 1:
self._p_der[self._output] = e.T
if i - epoch_best >= self._max_n_worst_epochs:
break
constants[:] = best_cons[:]
self._fitness[k] = fit_best
e, g = self.compute_error_pr(None, epoch=i)
@classmethod
def init_cl(cls, training_size=None,
max_length=1024, max_mem=500, argmax_nargs=2,
func=["+", "-", "*", "/", 'abs', 'exp', 'sqrt', 'sin',
'cos', 'sigmoid', 'if', 'max', 'min', 'ln', 'sq',
'argmax'], seed=0, **kwargs):
ins = cls(max_mem=max_mem, max_length=max_length,
argmax_nargs=argmax_nargs, func=func, seed=seed,
**kwargs)
if training_size is None:
return ins
base, pr = ins.max_mem_per_individual(training_size)
if (pr * ins._popsize) + base > ins._max_mem:
mm = ins._max_mem - base
assert mm > 0
popsize = np.floor(mm / np.float(pr)).astype(np.int)
nind = ins._gens * ins._popsize
popsize = filter(lambda x: (nind % x) == 0,
range(2, popsize+1))[-1]
ins._gens = np.floor(nind / popsize).astype(np.int)
ins._popsize = popsize
return ins
@classmethod
def run_cl(cls, x, f, test=None, ntries=10, pgrow=0.0,
**kwargs):
"""
Returns a trained system that does not output nan or inf neither
in the training set (i.e., x) or test set (i.e., test).
"""
if 'seed' in kwargs:
seed = kwargs['seed']
if seed is not None:
seed = int(seed)
else:
seed = 0
kwargs['seed'] = seed
kwargs['training_size'] = x.shape[0]
ins = cls.init_cl(**kwargs).train(x, f)
if test is not None:
ins.set_test(test)
ins.run()
test_f = lambda x: ((not np.any(np.isnan(x))) and
(not np.any(np.isinf(x))))
a = test_f(ins.predict(x))
if test is not None:
b = test_f(ins.predict(test))
else:
b = True
if a and b:
return ins
return None
| Python |
# Copyright 2013 Mario Graff Guerrero
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__version__ = '0.4.0'
from .simplega import SimpleGA
from .simplegp import GP, GPwRestart, GPMAE
from .gppde import GPPDE
from .forest import GPForest, SubTreeXO, SubTreeXOPDE
from .classification import Classification, ClassificationPDE
from .time_series import TimeSeries, RTimeSeries
from .recursiveGP import RecursiveGP, RGP
from .eval import Eval
from .Rprop_mod import RPROP
from utils import Pool, VerifyOutput
__all__ = [SimpleGA, TimeSeries, GP, GPPDE,
GPForest, SubTreeXO, SubTreeXOPDE, Classification,
RecursiveGP, RGP, RTimeSeries, GPwRestart, GPMAE,
Eval, RPROP, VerifyOutput, ClassificationPDE,
Pool]
| Python |
# Copyright 2013 Mario Graff Guerrero
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from SimpleGP.RecursiveGP_mod import Recursive
from SimpleGP.simplegp import GP
class RecursiveGP(GP):
def __init__(self, nlags=1, cases=None, **kwargs):
super(RecursiveGP, self).__init__(**kwargs)
self._nlags = nlags
self._cases = np.empty(0, dtype=np.int) if cases is None else cases
def train(self, x, f):
super(RecursiveGP, self).train(x, f)
self._g = np.zeros(self._x.shape[0], dtype=self._dtype)
self.__r = Recursive(self._nop, self._nop.shape[0], self._x,
self._mem, self._g, self._cases,
nlags=self._nlags)
def eval_ind(self, ind, pos=0, constants=None):
self._mem.fill(0)
constants = constants if constants is not None else self._constants
self.__r.eval_ind_inner_iter(ind,
constants,
pos)
return self._g
RGP = RecursiveGP
| Python |
import SimpleGP
reload(SimpleGP)
from SimpleGP import GP
import numpy as np
class GP2(GP):
"""This class exemplifies the change of the distance function.
In the example, the distance is MAE then the derivative of this
function is computed in the method compute_error_pr"""
def distance(self, y, yh):
return np.fabs(y - yh).mean()
def compute_error_pr(self, ind, pos=0, constants=None, epoch=0):
if epoch == 0:
g = self._st[self._output].T
else:
g = self.eval_ind(ind, pos=pos, constants=constants)
e = self._f - g
s = np.sign(e)
e = -1 * s
return e, g
def run(seed=0, pxo=0.9):
# seed = 0 # if len(sys.argv) == 1 else int(sys.argv[1])
x = np.linspace(0, 1, 100)
pol = np.array([0.2, -3.2, 1.3])
X = np.vstack((x**2, x, np.ones(x.shape[0])))
y = (X.T * pol).sum(axis=1)
gp = GP2(popsize=1000,
generations=50,
verbose=True,
verbose_nind=100,
min_length=1,
pleaf=None,
func=["+", "-", "*", "/", 'abs', 'exp', 'sqrt',
'sin', 'cos', 'sigmoid', 'max', 'min',
'ln', 'sq'],
min_depth=0, fname_best=None,
seed=seed, nrandom=100, pxo=pxo, pgrow=0.5, walltime=None)
gp.create_random_constants()
x = x[:, np.newaxis]
gp.train(x, y)
gp.create_population()
nvar = gp._func.shape[0]
gp._p[0] = np.array([0, 2, 14, nvar, nvar+1, 0, 2, nvar, nvar+2, nvar+3])
gp._p_constants[0] = pol * -1
print gp._max_nargs
print pol
print "Fit", gp.distance(gp._f, gp.eval(0))
print gp.print_infix(0)
gp.rprop(0)
print "Fit", gp.distance(gp._f, gp.eval(0))
print gp.print_infix(0)
# print gp._func[gp._func_allow]
# gp.run()
return gp
gp = run(0)
| Python |
import numpy as np
from SimpleGP import GPPDE
from multiprocessing import Process, cpu_count
import time
x = np.arange(-1, 1.1, 0.1)[:, np.newaxis]
l = np.array(map(lambda x: map(float, x.split()),
open('data/rational-problems.txt').readlines()))
def run(pr, seed):
print 'Haciendo r-%s-%s.npy' % (pr, seed)
f = l[pr]
gp = GPPDE(popsize=1000, generations=50, pxo=1.0,
fname_best=None, max_mem=500,
seed=seed)
gp.train(x, f)
gp.run()
lst = []
for i in range(l.shape[0]):
for j in range(30):
lst.append((i, j))
lst.reverse()
process_lst = []
while len(lst):
process_lst = filter(lambda x: x.is_alive(), process_lst)
for i in range(len(process_lst), cpu_count()):
if len(lst) == 0:
continue
p = Process(target=run, args=lst.pop())
p.start()
process_lst.append(p)
time.sleep(5)
| Python |
from SimpleGP import Classification
import numpy as np
fpt = open('data/iris.npy')
X = np.load(fpt)
cl = np.load(fpt)
fpt.close()
gp = Classification(popsize=1000, generations=50, verbose=True,
verbose_nind=1000,
func=["+", "-", "*", "/", 'abs', 'exp', 'sqrt',
'sin', 'cos', 'sigmoid', 'if', 'max', 'min',
'ln', 'sq'],
fname_best=None,
seed=0, nrandom=0,
pxo=0.9, pgrow=0.5, walltime=None)
gp.train(X, cl)
gp.run()
pr = gp.predict(X)
print gp.fitness(gp.get_best())
print (pr == cl).sum() / float(cl.shape[0])
print gp.print_infix()
| Python |
import numpy as np
from SimpleGP import TimeSeries, GPPDE
class TPDE(TimeSeries, GPPDE):
pass
def rse(x, y):
return ((x - y)**2).sum() / ((x - x.sum()/x.size)**2).sum()
ts = np.array(map(float, open('data/A.txt').readlines()))
nsteps = 100
gp = TPDE.run_cl(ts, nsteps=nsteps, verbose=True)
d = np.loadtxt('data/A.cont.txt')
print rse(d[:100], gp.predict_best())
| Python |
import SimpleGP
reload(SimpleGP)
from SimpleGP import GP
import numpy as np
x = np.linspace(-10, 10, 100)
pol = np.array([0.2, -0.3, 0.2])
X = np.vstack((x**2, x, np.ones(x.shape[0])))
y = (X.T * pol).sum(axis=1)
x = x[:, np.newaxis]
gp = GP(verbose=True, max_length=1000).train(x, y)
gp.run()
print gp.fitness(gp.get_best())
print gp.print_infix()
| Python |
from SimpleGP import GPPDE
import numpy as np
class Cl2(GPPDE):
def random_func(self, first_call=False):
if first_call:
# this is the argmax
return 16
return super(Cl2, self).random_func(first_call=first_call)
fpt = open('data/iris.npy')
X = np.load(fpt)
cl = np.load(fpt)
fpt.close()
gp = GPPDE(popsize=1000, generations=50, verbose=True,
verbose_nind=1000,
func=["+", "-", "*", "/", 'abs', 'exp', 'sqrt',
'sin', 'cos', 'sigmoid', 'if', 'max', 'min',
'ln', 'sq', 'argmax'],
fname_best=None,
seed=0, nrandom=0,
max_mem=500,
argmax_nargs=np.unique(cl).shape[0],
pxo=0.9, pgrow=0.5, walltime=None)
# Test
# cl = np.ones_like(cl) + 1
# gp.train(X, cl)
# gp.create_population()
# nfunc = gp._nop.shape[0]
# nvar = gp._x.shape[1]
# ind = np.array([nfunc-1, nfunc+nvar, nfunc+nvar+1, nfunc+nvar+2])
# gp._p[0] = ind
# gp._p_constants[0] = np.array([2.1, 18.5, 14.2])
# print gp.print_infix(0), gp.eval(0)[0]
# print gp.fitness(0)
# gp.rprop(0)
# print gp.print_infix(0)
# print gp.fitness(0)
gp.train(X, cl)
gp.run()
clh = np.round(gp.eval(gp.get_best()))
print gp.print_infix()
print gp.fitness(gp.get_best())
print (clh == cl).sum() / float(cl.shape[0])
| Python |
import numpy as np
import SimpleGP
reload(SimpleGP)
from SimpleGP import RGP, If, Sigmoid
class GP2(RGP):
def create_random_constants(self):
self._constants = np.random.uniform(-10,
10,
self._nrandom).astype(self._dtype)
if __name__ == '__main__':
seed = 1 # if len(sys.argv) == 1 else int(sys.argv[1])
ts = np.array(map(float, open('data/A.txt').readlines()))
nlags = int(np.ceil(np.log2(ts.shape[0])))
gp = GP2(popsize=10, generations=5000, verbose=True, verbose_nind=1000,
func=[np.add, np.subtract, np.multiply, np.divide, np.fabs,
np.exp, np.sqrt, np.sin, np.cos, Sigmoid, If()],
min_depth=1, nlags=nlags, fname_best=None,
seed=seed, max_length=ts.shape[0]*10, nrandom=100, pxo=0.2,
pgrow=0.5, walltime=None)
gp.create_random_constants()
x = np.zeros((ts.shape[0]-nlags, nlags+1))
x[0, :nlags] = ts[:nlags][::-1]
x[:, -1] = np.arange(ts.shape[0]-nlags)
gp.train(x, ts[nlags:])
gp.run()
| Python |
import numpy as np
from SimpleGP import SubTreeXO
from sklearn.metrics.pairwise import euclidean_distances
class GPPG(SubTreeXO):
def train(self, x, f):
super(GPPG, self).train(x.T, f)
return self
def eval_ind_X(self, ind, **kwargs):
trees = super(GPPG, self).eval_ind(ind, **kwargs)
if np.any(np.isnan(trees)) or np.any(np.isinf(trees)):
return None
return trees
def eval_ind(self, ind, x=None, **kwargs):
"""Eval a tree when the tree is a set of prototypes,
one prototype per class"""
trees = self.eval_ind_X(ind, **kwargs)
if trees is None:
r = np.empty_like(self._f)
r.fill(np.inf)
return r
x = self._x.T if x is None else x
d = euclidean_distances(x, trees.T)
return d.argmin(axis=1)
def distance(self, t, f):
cl = np.where(t == f, 1, 0).sum()
return 1 - cl / float(t.shape[0])
class GPPG2(GPPG):
def __init__(self, **kwargs):
super(GPPG2, self).__init__(**kwargs)
self._type_terminals = None
def train(self, x, f):
"""Each terminal is associated to the class, consequently,
the prototype can only be build using the values that belong
to the corresponding class"""
super(GPPG2, self).train(x, f)
self._ntrees_per_class_mod = np.unique(f).shape[0]
self._type_terminals = np.zeros(self._ntrees_per_class_mod,
dtype=np.object)
for i in range(self._ntrees_per_class_mod):
self._type_terminals[i] = np.where(self._f == i)[0]
return self
def random_leaf(self):
"""Choose a leaf depending on the i-th argument of root, i.e.,
Output function """
if np.random.rand() < 0.5 or self._nrandom == 0:
i = self._doing_tree
cnt = self._type_terminals[i].shape[0]
l = np.random.randint(cnt)
l = self._type_terminals[i][l]
return l + self._func.shape[0]
else:
l = np.random.randint(self._constants.shape[0])
return l + self._func.shape[0] + self._x.shape[1]
def run():
fpt = open('data/iris.npy')
X = np.load(fpt)
cl = np.load(fpt)
fpt.close()
gp = GPPG2(popsize=1000, generations=50,
verbose=True).train(X, cl)
gp.run()
return gp
if __name__ == '__main__':
run()
| Python |
from SimpleGP import GP
import numpy as np
seed = 0 # if len(sys.argv) == 1 else int(sys.argv[1])
x = np.linspace(0, 1, 100)
pol = np.array([0.2, -0.3, 0.2])
X = np.vstack((x**2, x, np.ones(x.shape[0])))
y = (X.T * pol).sum(axis=1)
gp = GP(popsize=10,
generations=100000,
verbose=True,
verbose_nind=1000,
min_length=1,
do_simplify=True,
func=["+", "-", "*", "/", 'abs', 'exp', 'sqrt',
'sin', 'cos', 'sigmoid', 'if', 'max', 'min',
'ln', 'sq'],
min_depth=0, fname_best='regression.npy',
seed=seed, nrandom=100, pxo=0.2, pgrow=0.5, walltime=None)
gp.create_random_constants()
x = x[:, np.newaxis]
gp.train(x, y)
gp.create_population()
nvar = gp._nop.shape[0]
ind = np.array([2, 3, 0, 0, nvar, nvar, 1, nvar, nvar,
0, 1, nvar, nvar, 2, nvar, nvar, 1, 3,
nvar, nvar, 3, nvar, nvar], dtype=np.int)
print gp.print_infix(ind)
ind2 = gp.simplify(ind)
print gp.print_infix(ind2, constants=gp._ind_generated_c)
ind = np.array([1, 0, 3, nvar, nvar, 1, nvar, nvar,
3, 2, nvar, nvar, 2, nvar, nvar], dtype=np.int)
print gp.print_infix(ind)
ind2 = gp.simplify(ind)
print gp.print_infix(ind2, constants=gp._ind_generated_c)
print ind2
ind = np.array([13, 5, 2, nvar, nvar], dtype=np.int)
print gp.print_infix(ind, constants=gp._ind_generated_c)
ind2 = gp.simplify(ind)
print gp.print_infix(ind2, constants=gp._ind_generated_c)
ind = np.array([5, 13, 2, nvar, nvar], dtype=np.int)
print gp.print_infix(ind, constants=gp._ind_generated_c)
ind2 = gp.simplify(ind)
print gp.print_infix(ind2, constants=gp._ind_generated_c)
ind = np.array([5, 13, 2, nvar, nvar], dtype=np.int)
print gp.print_infix(ind, constants=gp._ind_generated_c)
ind2 = gp.simplify(ind)
print gp.print_infix(ind2, constants=gp._ind_generated_c)
gp._p[0] = np.array([0, 2, nvar, nvar+2, nvar+1], dtype=np.int)
gp._p_constants[0] = np.array([0, 1.4])
print gp.print_infix(0)
gp.simplify(0)
print gp.print_infix(0) == "(X0 * 1.4)"
gp._p[0] = np.array([0, nvar+1, 2, nvar, nvar+2], dtype=np.int)
gp._p_constants[0] = np.array([0, 1.4])
print gp.print_infix(0)
gp.simplify(0)
print gp.print_infix(0) == "(X0 * 1.4)"
gp._p[0] = np.array([1, 0, 2, nvar, nvar+2, nvar+1,
2, nvar, nvar+2], dtype=np.int)
gp._p_constants[0] = np.array([0, 1.4])
print gp.print_infix(0)
gp.simplify(0)
print gp.print_infix(0)
| Python |
import SimpleGP
reload(SimpleGP)
from SimpleGP import GPPDE
import numpy as np
x = np.linspace(-10, 10, 100)
pol = np.array([0.2, -0.3, 0.2])
X = np.vstack((x**2, x, np.ones(x.shape[0])))
y = (X.T * pol).sum(axis=1)
x = x[:, np.newaxis]
gp = GPPDE(generations=30, verbose=True,
# max_mem=10, verbose_nind=10,
# update_best_w_rprop=True,
max_length=1000).train(x, y)
gp.run()
assert gp.fitness(gp.get_best()) >= -2.2399825722547702e-06
print gp.fitness(gp.get_best())
print gp.print_infix()
| Python |
import SimpleGP
reload(SimpleGP)
from SimpleGP import GPRPropU
import numpy as np
x = np.linspace(0, 10, 100)
pol = np.array([0.2, -0.3, 0.2])
X = np.vstack((x**2, x, np.ones(x.shape[0])))
y = (X.T * pol).sum(axis=1)
gp = GPRPropU(popsize=1000,
generations=50*5,
verbose=True,
verbose_nind=1000,
do_simplify=True,
func=['+', '-', '*', "/", 'abs', 'exp', 'sqrt',
'sin', 'cos', 'sigmoid', 'if', 'max', 'min',
'ln', 'sq'],
min_length=2,
pleaf=None,
min_depth=0, fname_best=None,
seed=0, nrandom=100, pxo=1.0, pgrow=0.5, walltime=None)
x = x[:, np.newaxis]
gp.train(x, y)
gp.run()
print gp.fitness(gp.get_best())
print gp.print_infix()
| Python |
# Copyright 2013 Mario Graff Guerrero
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy
from setuptools import setup
from setuptools import Extension
from Cython.Distutils import build_ext
from distutils import sysconfig
import os
from os.path import join
# -mno-fused-madd
lst = ['CFLAGS', 'CONFIG_ARGS', 'LIBTOOL', 'PY_CFLAGS']
for k, v in zip(lst, sysconfig.get_config_vars(*lst)):
if v is None:
continue
v = v.replace('-mno-fused-madd', '')
os.environ[k] = v
ext_modules = [Extension("SimpleGP.EA_aux_functions",
[join("SimpleGP", "aux_functions.pyx")],
libraries=["m"],
include_dirs=[numpy.get_include()]),
Extension("SimpleGP.Simplify_mod",
[join("SimpleGP", "simplify.pyx")],
libraries=["m"],
include_dirs=[numpy.get_include()]),
Extension("SimpleGP.Rprop_mod",
[join("SimpleGP", "rprop.pyx")],
libraries=["m"],
include_dirs=[numpy.get_include()]),
Extension("SimpleGP.RecursiveGP_mod",
[join("SimpleGP", "recursive.pyx")],
libraries=["m"],
include_dirs=[numpy.get_include()]),
Extension("SimpleGP.eval",
[join("SimpleGP", "eval.pxd"),
join("SimpleGP", "eval.pyx")],
libraries=["m"],
include_dirs=[numpy.get_include()]),
Extension("SimpleGP.tree",
[join("SimpleGP", "tree.pyx")],
libraries=["m"],
include_dirs=[numpy.get_include()]),
Extension("SimpleGP.pde",
[join("SimpleGP", "pde.pxd"),
join("SimpleGP", "pde.pyx")],
libraries=["m"],
include_dirs=[numpy.get_include()])]
version = open("VERSION").readline().lstrip().rstrip()
lst = open(join("SimpleGP", "__init__.py")).readlines()
for k in range(len(lst)):
v = lst[k]
if v.count("__version__"):
lst[k] = "__version__ = '%s'\n" % version
with open(join("SimpleGP", "__init__.py"), "w") as fpt:
fpt.write("".join(lst))
setup(
name="SimpleGP",
description="""A GP systems for symbolic regression and
auto-recursive regression""",
version=version,
url='http://dep.fie.umich.mx/~mgraffg',
author="Mario Graff",
author_email="mgraffg@dep.fie.umich.mx",
cmdclass={"build_ext": build_ext},
ext_modules=ext_modules,
packages=['SimpleGP'],
install_requires=['cython >= 0.19.2', 'numpy >= 1.6.2']
)
| Python |
import RDF
from flyingfish.utils import modeldump, sparql, str_scalar_or_list
from flyingfish.client import RDFconnection
print "start"
connection = RDFconnection(model="sample_storage", storage="postgresql", startnew=True,
options = "host='127.0.0.1',database='ffish',user='root','password=''")
print "connection created"
client = connection.client()
print "connected"
#test_file='./flyingfish/sample-data/tim.rdf'
#uri=RDF.Uri(string="file:"+test_file)
#parser=RDF.Parser('raptor')
#for s in parser.parse_as_stream(uri,uri):
# client.model.add_statement(s)
print modeldump(client.model)
| Python |
import RDF
from flyingfish.utils import modeldump, sparql, str_scalar_or_list
from flyingfish.client import RDFconnection
print "start"
connection = RDFconnection(model="sample_storage", storage="postgresql", startnew=True,
options = "host='127.0.0.1',database='ffish',user='root','password=''")
print "connection created"
client = connection.client()
print "connected"
#test_file='./flyingfish/sample-data/tim.rdf'
#uri=RDF.Uri(string="file:"+test_file)
#parser=RDF.Parser('raptor')
#for s in parser.parse_as_stream(uri,uri):
# client.model.add_statement(s)
print modeldump(client.model)
| Python |
import RDF
from flyingfish.utils import modeldump, sparql, str_scalar_or_list
from flyingfish.client import RDFconnection
print "start"
connection = RDFconnection(model="sample_storage", storage="postgresql", startnew=False,
options = "host='127.0.0.1',database='ffish',user='root','password=''")
client = connection.client()
#print modeldump(client.model)
tags = client.query('ext:Tag', '?uri ext:owner "1"', ['ext'])
print tags
| Python |
import RDF
from flyingfish.utils import modeldump, sparql, str_scalar_or_list
from flyingfish.client import RDFconnection
connection = RDFconnection(model="sample_storage", storage="postgresql", startnew=False,
options = "host='127.0.0.1',database='ffish',user='root','password=''")
client = connection.client()
print modeldump(client.model)
| Python |
import RDF
import string
def modeldump(model):
output = ""
for s in model.as_stream():
output += "Statement: %s\n" % s
return output
def sparql(model, sparql):
output = ""
q = RDF.Query(sparql, query_language="sparql")
for result in q.execute(model):
output += "{\n"
for k in result:
output += "\t%s = %s\n" % (k,result[k])
output += "}\n"
return output
def str_scalar_or_list(var):
if type(var) is list:
return string.join( map(str, var), ', ' ),
else:
return str(var)
| Python |
from constants import NAMESPACES, DICT_DELIMITER, ATTR_DELIMITER
class ListString(list):
""" Modified List
You could do with this=ListString():
- refer this as usual List
- call str(this), unicode(this) with better formating
- this.as_string() same to str(this)
- this.as_list() same to this (!!! Dummy method)
- this.as_is() will return None|Object|List depending on content
- this.is_empty() - True if we have no elements
- this.is_string() - True if we have exactly 1 item
- this.is_list() - True if we have more than 1 item
"""
def __call__(self, *args):
return self #list(self)
def __str__(self):
""" With some better output formating """
#could be better, without copying ...
data = list(self)
result = ""
splitter = ", "
empty = "-"
for item in data:
if not item is None:
result+= str(item) + splitter
else:
result+= empty + splitter
return result[:-len(splitter)]
def __unicode__(self):
return unicode(self.__str__())
def __repr__(self):
return self.__str__()
def as_is(self):
""" will return None|Object|List depending on content """
if len(self) == 0:
return None
elif len(self) == 1:
return list(self)[0]
else:
return self
def as_string(self):
""" same to str(this) """
return self.__unicode__()
def as_list(self):
""" same to self (!!! Dummy method) """
return list(self)
def is_empty(self):
""" True if we have no elements """
if len(self) == 0:
return True
else:
return False
def is_string(self):
""" True if we have exactly 1 item """
if len(self) == 1:
return True
else:
return False
def is_list(self):
""" True if we have more than 1 item """
if len(self) > 1:
return True
else:
return False
def __nonzero__(self):
""" bool(self) returns True only if we have at last 1 non-zero length element
it helps to send non-empty list into Django-template ("",) and show filled inputs,
but still check it for existence {%if a %}
"""
if len(self.__str__())==0:
return False
else:
return True
def __getitem__(self, uri):
""" results["ext:Tag#1213213"]
To select exact result from list
"""
from model import shortcut_to_uri
for item in self:
if item==uri or item==shortcut_to_uri(uri):
return item
return
def __getattr__(self, predicate):
''' results.foaf_knows '''
predicate_delimited = predicate.replace(ATTR_DELIMITER, DICT_DELIMITER, 1)
return self.__getitem__(predicate_delimited) | Python |
import RDF
from model import RDFmodel, shortcut_to_uri
from constants import NAMESPACES
from utilListString import ListString
class RDFconnection:
def __init__(self, model, storage, startnew, options):
if startnew: start_new = ",new='yes'"
else: start_new = ",new='no'"
self.storage = RDF.Storage(storage_name=storage, name=model, options_string=options+start_new)
self.model = RDF.Model(self.storage)
def client(self):
return RDFclient(self.model)
class RDFclient:
def __init__(self, model):
self.model = model
def get(self, root_URI, cache = False):
return RDFmodel(self.model, root_URI, cache)
def type(self, root_type):
if not root_type:
return None
root_type = shortcut_to_uri(root_type)
qstr = "- [http://www.w3.org/1999/02/22-rdf-syntax-ns#type] [%s]" % (root_type)
#print "URI %s, query %s" % (self.root_type, qstr)
qset = RDF.Query(qstr, query_language="triples")
result = []
for statement in qset.execute(self.model).as_stream():
result.append(RDFmodel(self.model,statement.subject))
return result
def query_sparql(self, sparql):
''' SPARQL query must return set of "uri" variables '''
qset = RDF.Query(sparql, query_language="sparql")
result = ListString()
for item in qset.execute(self.model):
result.append(RDFmodel(self.model,item['uri']))
return result
def query(self, root_type, where, prefixes = []):
sparql = ""
for pfx in prefixes:
if NAMESPACES[pfx]:
sparql += "PREFIX %s: <%s>\n" % (pfx, NAMESPACES[pfx])
else:
raise Error
sparql += "SELECT ?uri WHERE { ?uri <http://www.w3.org/1999/02/22-rdf-syntax-ns#type> %s . %s }" % (root_type, where)
#print sparql
return self.query_sparql(sparql)
def query_triple(self, triple, element = 'object'):
""" triple - it's a list from 3 elements - subject, predicate, object """
for i in range(len(triple)):
if len(triple[i])==0:
triple[i] = "-"
else:
triple[i] = "[" + shortcut_to_uri(triple[i]) + "]"
qstr = " ".join(triple)
results = ListString()
try:
qset = RDF.Query(qstr, query_language="triples")
for statement in qset.execute(self.model).as_stream():
results.append(RDFmodel(self.model, getattr(statement, element)))
except:
return results #stay quiet #raise AttributeError, predicate
return results | Python |
#Model options
NAMESPACES = {
'dc' : 'http://purl.org/dc/elements/1.1/',
'foaf': 'http://xmlns.com/foaf/0.1/',
'rdfs': 'http://www.w3.org/2000/01/rdf-schema#',
'rdf' : 'http://www.w3.org/1999/02/22-rdf-syntax-ns#',
'rss' : 'http://purl.org/rss/1.0/',
'ext' : 'http://extmem.com/rdfs/ext/'
}
DICT_DELIMITER = ":"
ATTR_DELIMITER = "_" | Python |
import RDF
from constants import NAMESPACES, DICT_DELIMITER, ATTR_DELIMITER
from utilListString import ListString
def uri_to_shortcut(uri):
''' convert URI to internal shortcuts-style like foaf:knows'''
uri = str(uri) # hey, but convert RDF.Node to string by yourself!
for prefix, ns_uri in NAMESPACES.iteritems():
if uri.startswith(ns_uri):
return uri.replace(ns_uri, prefix + DICT_DELIMITER, 1)
return uri #no match
def shortcut_to_uri(shortcut):
''' convert internal shortcuts-style to full URI'''
if shortcut is None:
return ''
for prefix, ns_uri in NAMESPACES.iteritems():
if shortcut.startswith(prefix + DICT_DELIMITER):
return shortcut.replace(prefix + DICT_DELIMITER, ns_uri, 1)
return shortcut #no match
class RDFmodel:
''' Type-less RDF mapper class. '''
# All properties should be declared beforehand - to avoid superimpose by RDF-predicates
model = None
root_URI = None
cache = None
cached = False
def __init__(self, model, root_URI, cache = False):
''' takes Redland model and URI of RDF object to map. '''
self.model = model
if type(root_URI) is RDF.Node and root_URI.is_resource(): #a trick :\ literal type shouldn't be here
self.root_URI = str(root_URI.uri)
else:
self.root_URI = str(root_URI)
if cache:
self.cache_checkup()
def cache_object(self):
''' it's preliminary cache - dumps RDF-object to python dictionary:
all predicates&object to given root_URI subject
useful only for objects up to hundreds relations, should be substituted in future'''
if self.root_URI is None: #can not query blank nodes
return
try:
qstr = "[%s] - -" % (self.root_URI) #prepare triple-type query
#print "URI %s, query %s" % (self.root_URI, qstr)
qset = RDF.Query(qstr, query_language="triples")
#construct cache - dictionary with a predicates as keys, values are lists for multiply statements
self.cache = {}
for statement in qset.execute(self.model).as_stream():
#need to convert from RDF.Node to string shortcuts - we cann't lookup dictionary by it
predicate = uri_to_shortcut(statement.predicate.uri) #(statement.predicate)
#if self.cache.has_key(predicate):
# if type(self.cache[predicate]) is list:
# self.cache[predicate].append(RDFmodel(self.model,statement.object)) #do not cache derivative models
# else:
# self.cache[predicate] = [self.cache[predicate], RDFmodel(self.model,statement.object)]
#else:
# self.cache[predicate] = RDFmodel(self.model,statement.object)
if self.cache.has_key(predicate):
self.cache[predicate].append(RDFmodel(self.model,statement.object))
else:
self.cache[predicate] = ListString()
self.cache[predicate].append(RDFmodel(self.model,statement.object))
self.cached = True #set up cache
except: #bad query
return
def cache_checkup(self):
'''silly cache-controller'''
if not self.cached:
self.cache_object()
def cache_introspect(self, INDENT = 40):
''' to introspect what is inside in the cached RDF-object
INDENT - the constant to format left-column(predicates) width'''
self.cache_checkup()
output = "%s {\n" % self.root_URI
if not self.cached: return output + "}"
for p, o in self.cache.iteritems():
indent = INDENT-len(str(p))
output += "%s:%s\t" % (p, " "*indent)
if type(o) is list:
output += "( "
for item in o:
output += "%s, " % item
output += ")\n"
else:
output += "%s\n" % o
output += "\n}"
return output
def predicates(self):
''' returns a list of all predicates'''
self.cache_checkup()
return self.cache.keys()
def __len__(self):
self.cache_checkup()
objects = 0
for p, o in self.cache.iteritems():
if type(o) is list: objects += len(o)
else: objects += 1
return objects
def __getitem__(self, predicate):
''' rmodel["foaf:knows"] '''
####HACK FOR DJANGO! - it looks in dict before attr, need to convert:
####HACK#2 - predicate could be unicode - convert to sting
predicate = str(predicate).replace(ATTR_DELIMITER, DICT_DELIMITER, 1)
#print "GETTING %s" % predicate
if self.cached:
if not self.cache.has_key(predicate):
#print "CACHE - no hit: %s" % predicate
return None #stay quiet #raise AttributeError, predicate
#print "CACHE %s => %s" % (predicate, self.cache[predicate])
return self.cache[predicate]
else:
#print "REQUEST"
qstr = "[%s] [%s] -" % (self.root_URI, shortcut_to_uri(predicate))
results = ListString()
try:
qset = RDF.Query(qstr, query_language="triples")
for statement in qset.execute(self.model).as_stream():
results.append(RDFmodel(self.model, statement.object))
#print RDFmodel(self.model, statement.object)
except:
#print qstr
return results #stay quiet #raise AttributeError, predicate
return results
def __getattr__(self, predicate):
''' rmodel.foaf_knows '''
predicate_delimited = predicate.replace(ATTR_DELIMITER, DICT_DELIMITER, 1)
return self.__getitem__(predicate_delimited)
def has_attribute(self, predicate):
''' quite silly so far - only with cache '''
self.cache_checkup()
if self.cache.has_key(predicate):
return True
return False
def __setitem__(self, predicate, object):
''' rmodel["foaf:knows"] = object '''
#need better check between URI and Literal
if shortcut_to_uri(object) == object:
node_object = RDF.Node(object)
else:
node_object = RDF.Node(uri_string=shortcut_to_uri(object))
statement = RDF.Statement(RDF.Node(uri_string=self.root_URI),
RDF.Node(uri_string=shortcut_to_uri(predicate)),
node_object)
if not self.model.contains_statement(statement):
self.model.append(statement)
#print 'Appended: ', repr(statement)
self.cached = False #silly cache control
def __setattr__(self, predicate, object):
''' rmodel.foaf_knows = object '''
#do not use self.__dict__ - it'll show only run-time created attrs
if RDFmodel.__dict__.has_key(predicate):
self.__dict__[predicate] = object
else:
predicate_delimited = predicate.replace(ATTR_DELIMITER, DICT_DELIMITER, 1)
self.__setitem__(predicate_delimited, object)
def __delitem__(self, predicate):
''' del rmodel["foaf:knows"] '''
predicate = shortcut_to_uri(predicate)
qstr = "[%s] [%s] -" % (self.root_URI, shortcut_to_uri(predicate))
qset = RDF.Query(qstr, query_language="triples")
for statement in qset.execute(self.model).as_stream():
del self.model[statement]
self.cached = False #silly cache control
def __delattr__(self, predicate):
''' del rmodel.foaf_knows '''
predicate_delimited = predicate.replace(ATTR_DELIMITER, DICT_DELIMITER, 1)
self.__delitem__(predicate_delimited)
def delete(self):
''' Delete all object statements. Redland doesn't delete literals/URIs, only connections '''
for p in self.predicates():
self.__delitem__(p)
def __str__(self):
return str(self.root_URI)
def __unicode__(self):
return unicode(self.root_URI)
def __repr__(self):
return self.cache_introspect()
#Dirty hacks for Django template system. It needs this variable, it need this call! It's the way templates resolve variables
alters_data=False #The template system wont execute a method if the method has alters_data=True set
def __call__(self):
#print "Executing %s" % self.root_URI
return self
def __eq__(self, value):
if str(self.root_URI) == str(value):
return True
else:
return False
def keys(self):
self.cache_checkup()
return self.cache.keys()
def items(self):
self.cache_checkup()
return self.cache.items()
def values(self):
self.cache_checkup()
return self.cache.values()
#TODO: is_list_value as_str as_list | Python |
'''
This module is home to the ScrapeEngine class.
@author: Cory Banack
'''
import clr
import resources
import log
from utils import sstr
from configuration import Configuration
from comicform import ComicForm
from seriesform import SeriesForm, SeriesFormResult
from issueform import IssueForm, IssueFormResult
from progressbarform import ProgressBarForm
from searchform import SearchForm, SearchFormResult
import utils
import db
import bookutils
from welcomeform import WelcomeForm
from finishform import FinishForm
clr.AddReference('System.Windows.Forms')
from System.Windows.Forms import Application, MessageBox, \
MessageBoxButtons, MessageBoxIcon
# =============================================================================
class ScrapeEngine(object):
'''
This class contains the main processing loop for the Comic Vine Scraper
script. Once initialized, you pass a collection of books to the
ScrapeEngine via the 'scrape' method.
Those books will be processed one at a time, with windows and dialogs
popping up to interact with the user as needed (including a single
ComicForm window, which is present the during the entire scrape to show
the user the status of the ScrapeEngine.)
'''
# ==========================================================================
def __init__(self, comicrack):
'''
Initializes this ScrapeEngine. It takes the ComicRack Application
object as it's only parameter.
'''
# the Configuration details for this ScrapeEngine. used everywhere.
self.config = Configuration()
# the ComicRack application object, i.e. the instance of ComicRack that
# is running this script. used everywhere.
self.comicrack = comicrack
# a list of methods that will each be fired whenever the 'scrape'
# operation begins processing/scraping a new book. these methods should
# look like:
# start_scrape(book, num_remaining)
#
# where 'book' is the new book being scraped and 'num_remaining' is the
# number of books left to scrape, including the one currently starting
self.start_scrape_listeners = []
# a list of no-argument methods that will each be fired once
# when (and if) the scrape operation gets cancelled.
self.cancel_listeners = []
# this variable can be set by calling the 'cancel' method. when it is
# set to True, it indicates that the entire script should be cancelled as
# soon as possible.
self.__cancelled_b = False
# a list of two values, the first value tells how many books this
# scrape engine has scraped, the second tells how many it has skipped.
# it becomes valid as soon as the main processing loop starts running.
self.__status = [0,0]
# ==========================================================================
def cancel(self):
'''
This method cancels the ScrapeEngine's current scrape operation,
and causes the main processing loop to exit on the next iteration;
all ComicBooks that haven't yet been scraped will be skipped.
This method is thread safe.
'''
if not self.__cancelled_b:
def delegate():
if not self.__cancelled_b:
self.__cancelled_b = True;
for cancel_listener in self.cancel_listeners:
cancel_listener()
utils.invoke(self.comicrack.MainWindow, delegate, False)
# ==========================================================================
def scrape(self, books):
'''
This is the entry-point to the ScraperEngine's main processing loop.
A typical invocation of the scraper script will create a new ScraperEngine
object and then call this method on it ONCE, passing it a list of all the
ComicBook objects that need to be scraped.
'''
try:
# a litte bit of logging to help make our debug logs more useful
log.debug()
log.debug("-"*80)
log.debug("CV Scraper Version: ", resources.SCRIPT_VERSION)
log.debug("Comic Rack Version: ", self.comicrack.App.ProductVersion)
log.debug("Cache Directory: ", resources.LOCAL_CACHE_DIRECTORY)
log.debug("Settings File: ", resources.SETTINGS_FILE)
log.debug("-"*80)
log.debug()
# do the main part of the script
if books:
# this populates the "status" variable, and the "config" variable
self.__scrape(books)
log.debug("Scraper terminated normally (scraped {0}, skipped {1})."\
.format(self.__status[0], self.__status[1]))
except Exception, ex:
log.handle_error(ex)
finally:
if self.config.summary_dialog_b:
try:
# show the user a dialog describing what was scraped
with FinishForm(self, self.__status) as finish_form:
finish_form.show_form()
except Exception, ex:
log.handle_error(ex)
# ==========================================================================
def __scrape(self, books):
'''
The private implementation of the 'scrape' method.
This method returns a list containing two integers. The first integer
is the number of books that were scraped, the second is the number that
were skipped over.
'''
# initialize the status member variable, and then keep it up-to-date
# from now on (so that it can be used to report the status of this
# scrape, even if an error occurs.)
self.__status = [0, len(books)];
# 1. show the welcome form. in addition to being a friendly summary of
# what's about to happen, it loads (and allows the user to tweak)
# the Configuration that we'll use for the remainder of this operation.
with WelcomeForm(self, books) as welcome_form:
self.__cancelled_b = not welcome_form.show_form()
self.config = Configuration()
self.config.load_defaults()
if self.__cancelled_b:
# 2a. user cancelled the scrape
return
else:
# 2b. print the entire configuration to the debug stream
log.debug(self.config)
log.debug()
# 2. sort the ComicBooks in the order that we're gonna loop them in
# (sort AFTER config is loaded cause config affects the sort!)
books = self.__sort_books(books)
# 3. display the ComicForm dialog. it is a special dialog that stays
# around for the entire time that the this scrape operation is running.
comic_form = ComicForm.show_threadsafe(self)
try:
# this caches the scraped data we've accumulated as we loop
scrape_cache = {}
# 4. start the "Main Processing Loop".
# notice the list of books can get longer while we're looping,
# if we choose to delay processing a book until the end.
i = 0;
orig_length = len(books)
while i < len(books):
if self.__cancelled_b: break
book = books[i]
# 4a. notify 'start_scrape_listeners' that we're scraping a new book
log.debug("======> scraping next comic book: '",book.filename_s,"'")
num_remaining = len(books) - i
for start_scrape in self.start_scrape_listeners:
start_scrape(book, num_remaining)
# 4b. ...keep trying to scrape that book until either it is scraped,
# the user chooses to skip it, or the user cancels altogether.
manual_search_b = self.config.specify_series_b
fast_rescrape_b = self.config.fast_rescrape_b and i < orig_length
bookstatus = self._BookStatus.UNSCRAPED
while bookstatus == self._BookStatus.UNSCRAPED \
and not self.__cancelled_b:
bookstatus = self.__scrape_book(book, scrape_cache,
manual_search_b, fast_rescrape_b)
if bookstatus == self._BookStatus.UNSCRAPED:
# this return code means 'no series could be found using
# the current (automatic or manual) search terms'. when
# that happens, force the user to chose the search terms.
manual_search_b = True
elif bookstatus == self._BookStatus.SCRAPED:
# book was scraped normally, all is good, update status
self.__status[0] += 1;
self.__status[1] -= 1;
elif bookstatus == self._BookStatus.SKIPPED:
# book was skipped, status is already correct for that book
pass;
elif bookstatus == self._BookStatus.DELAYED:
# put this book into the end of the list, where we can try
# rescraping (with fast_rescrape_b set to false this time)
# after we've handled the ones that we can do automatically.
books.append(book)
log.debug()
log.debug()
i = i + 1
finally:
self.comicrack.MainWindow.Activate() # fixes issue 159
if comic_form: comic_form.close_threadsafe()
# ==========================================================================
def __scrape_book(self, book, scrape_cache,
manual_search_b, fast_rescrape_b):
'''
This method is the heart of the Main Processing Loop. It scrapes a single
ComicBook object by first figuring out which issue entry in the database
matches that book, and then copying those details into the ComicBook
object's metadata fields.
The steps involved are:
1. Come up with search terms for the given 'book'
- if 'manual_search_b' then guess the terms based on the book's name
- else ask the user to provide search terms
2. Search database for all comic series that match those search terms.
3. Ask the user which of the resulting series is the correct one
4a. If the user picks a series:
- we guess which issue in that series matches our ComicBook, OR
- we ask the user to specify the correct issue (if we can't guess)
4b. Else the use might decide to skip scraping this book.
4c. Else the user might decide to start over with new search terms
4d. Else the user might choose to specify the correct issue manually
4e. Else the user might cancel the entire operation
Throughout this process, the 'scrape_cache' (a map, empty at first) is
used to speed things up. It caches details from previous calls to this
method, so if this method is called repeatedly, the same scrape_cache
should be passed in each time.
Iff 'fast_rescrape_b' is set to true, this method will attempt to find
and use any database key that was written to the book during a previous
scrape. This key allows us to instantly identify a comic, thus skipping
the steps described above. If no key is available, just fall back to
the user-interactive method of identifying the comic.
RETURN VALUES
_BookStatus.UNSCRAPED: if the book wasn't be scraped, either because
the search terms yielded no results, or the user opted to specify
new search terms
_BookStatus.SKIPPED: if this one book was skipped over by the user, or
of the user cancelled the entire current scrape operation (check the
status if the ScrapeEngine).
_BookStatus.SCRAPED: if the book was scraped successfully, and now
contains updated metadata.
_BookStatus.DELAYED: if we attempted to do a fast_rescrape on the book,
but failed because the database key was invalid. the book has not
been scraped successfully.
'''
# WARNING: THE CODE IN THIS METHOD IS EXTREMELY SUBTLE.
# Be sure you understand EVERYTHING that's going on and why before you
# try to change anything in here. You've been warned!
Application.DoEvents()
if self.__cancelled_b: return self._BookStatus.SKIPPED
# 1. if this book is being 'rescraped', sometimes it already knows the
# correct issue_ref from a previous scrape. METHOD EXIT: if that
# rescrape issue_ref is available, we use it immediately and exit. if
# the issue_ref is the string "skip", we skip this book.
issue_ref = bookutils.extract_issue_ref(book)
if issue_ref == 'skip':
log.debug("found SKIP tag, so skipping the scrape for this book.")
return self._BookStatus.SKIPPED
if issue_ref and fast_rescrape_b:
log.debug("found rescrape tag in book, " +
"scraping details directly: " + sstr(issue_ref));
try:
issue = db.query_issue(issue_ref)
book.save_issue(issue, self)
return self._BookStatus.SCRAPED
except:
log.debug_exc("Error rescraping details:")
log.debug("we'll retry scraping this book again at the end.")
return self._BookStatus.DELAYED
# 2. search for all the series in the database that match the current
# book. if info for this book's series has already been cached, we
# can skip this step. METHOD EXIT: if we show the user the 'search'
# dialog, she may use it to skip this book or cancel the whole scrape.
log.debug("no CVDB tag found in book, beginning search...")
search_terms_s = None
series_refs = None
key = book.unique_series_s()
if key in scrape_cache and not self.config.scrape_in_groups_b:
# uncaching this key forces the scraper to treat this comic series
# as though this was the first time we'd seen it
del scrape_cache[key]
if key not in scrape_cache:
# get serach terms for the book that we're scraping
search_terms_s = book.series_s
if manual_search_b or not search_terms_s:
# show dialog asking the user for the right search terms
search_terms_s = self.__choose_search_terms(search_terms_s)
if search_terms_s == SearchFormResult.CANCEL:
self.__cancelled_b = True
return self._BookStatus.SKIPPED
elif search_terms_s == SearchFormResult.SKIP:
return self._BookStatus.SKIPPED
# query the database for series_refs that match the search terms
series_refs = self.__query_series_refs(search_terms_s)
if self.__cancelled_b:
return self._BookStatus.SKIPPED
if not series_refs:
MessageBox.Show(self.comicrack.MainWindow,
"Couldn't find any comic books that match the search terms:\n\n"\
" '" + search_terms_s + "'\n\n"\
"Be sure that these search terms are spelled correctly!\n\n"\
"Searches should include part (or all) of a comic book's "\
"title,\nbut NOT its issue number, publisher, publication "\
"date, etc.",
"Search Failed", MessageBoxButtons.OK, MessageBoxIcon.Warning)
return self._BookStatus.UNSCRAPED
# 3. now that we have a set if series_refs that match this book,
# show the user the 'series dialog' so they can pick the right one.
# put the chosen series into the cache so the user won't have to
# pick it again for any future books that are in this book's series.
# METHOD EXIT: while viewing the series dialog, the user might skip,
# request to re-search, or cancel the entire scrape operation.
while True:
force_issue_dialog_b = False
if key not in scrape_cache:
if not series_refs or not search_terms_s:
return self._BookStatus.UNSCRAPED # rare but possible, bug 77
result = self.__choose_series_ref(book, search_terms_s, series_refs)
if SeriesFormResult.CANCEL==result.get_name() or self.__cancelled_b:
self.__cancelled_b = True
return self._BookStatus.SKIPPED # user says 'cancel'
elif SeriesFormResult.SKIP == result.get_name():
return self._BookStatus.SKIPPED # user says 'skip this book'
elif SeriesFormResult.SEARCH == result.get_name():
return self._BookStatus.UNSCRAPED # user says 'search again'
elif SeriesFormResult.SHOW == result.get_name() or \
SeriesFormResult.OK == result.get_name(): # user says 'ok'
scraped_series = self._ScrapedSeries()
scraped_series.series_ref = result.get_ref()
force_issue_dialog_b = SeriesFormResult.SHOW == result.get_name()
scrape_cache[key] = scraped_series
# one way or another, the chosen series is now in the cache. get it.
scraped_series = scrape_cache[key]
# 4. now that we know the right series for this book, query the
# database for the issues in that series. then try to pick one,
# either automatically, or by showing the use the "issues dialog".
# also, cache the issue data, so we don't have to query again if we
# scrape another book from this series. METHOD EXIT: if the user
# sees the query dialog, she may skip, cancel the whole scrape,
# go back to the series dialog, or actually an issue.
log.debug("searching for the right issue in '",
scraped_series.series_ref, "'")
# get the issue refs for our chosen series
if not scraped_series.issue_refs:
scraped_series.issue_refs = \
self.__query_issue_refs(scraped_series.series_ref)
if self.__cancelled_b:
return self._BookStatus.SKIPPED
# choose the issue that matches the book we are scraping
result = self.__choose_issue_ref( book, scraped_series.series_ref,
scraped_series.issue_refs, force_issue_dialog_b)
if result.get_name() == IssueFormResult.CANCEL or self.__cancelled_b:
self.__cancelled_b = True
return self._BookStatus.SKIPPED
elif result.get_name() == IssueFormResult.SKIP:
if force_issue_dialog_b:
# the user clicked 'show issues', then 'skip', so we have to
# ignore his previous series selection.
del scrape_cache[key]
return self._BookStatus.SKIPPED
elif result.get_name() == IssueFormResult.BACK:
# ignore users previous series selection
del scrape_cache[key]
else:
# we've the right issue! copy it's data into the book.
log.debug("querying comicvine for issue details...")
issue = db.query_issue( result.get_ref() )
book.save_issue(issue, self)
return self._BookStatus.SCRAPED
raise Exception("should never get here")
# ==========================================================================
def __sort_books(self, books):
'''
Examines the given list of ComicBook objects, and returns a new list
that contains the same comics, but sorted in order of increasing series
name, and where the series names are the same, in order of increasing
issue number. Comics for which an IssueRef can be instantly generated
(comics that have been scraped before) will automatically be sorted to
the beginning of the list.
'''
# this is the comparator we'll use for sorting this list
def __compare_books(book1, book2):
result = book1.unique_series_s().CompareTo(book2.unique_series_s())
if result == 0:
num1 = '' if not book1.issue_num_s else book1.issue_num_s
num2 = '' if not book2.issue_num_s else book2.issue_num_s
def pad(num):
try:
f = float(num.lower().strip('abcdefgh'))
if f < 10: return "000" + num
elif f < 100: return "00" + num
elif f < 1000: return "0" + num
else: return num
except:
return num
result = pad(num1).CompareTo(pad(num2))
return result
# divide the books up into the ones that will scrape quickly (cause they
# are rescrapes) and ones that have never been scraped before. sort each
# group separately, and append the sorted lists together so the fast ones
# will come first. (the idea is to save the user interaction until
# the end of the scrape operation. see issue 161.)
slow_scrape_books = []
fast_scrape_books = []
if self.config.fast_rescrape_b:
for book in books:
if bookutils.extract_issue_ref(book):
fast_scrape_books.append(book)
else:
slow_scrape_books.append(book)
else:
slow_scrape_books = list(books)
slow_scrape_books.sort(cmp=__compare_books)
fast_scrape_books.sort(cmp=__compare_books)
return fast_scrape_books+slow_scrape_books
# ==========================================================================
def __choose_search_terms(self, initial_search_terms=""):
'''
Displays a dialog asking the user for search terms. The given search
terms will be used to pre-populate the dialog results.
Returns a non-empty string containing the user's specified search terms,
or SearchFormResult.CANCEL if the user cancelled the scrape operation, or
SearchFormResult.SKIP if the user wants to skip the current book.
'''
log.debug('asking user for series search terms...');
with SearchForm(self, initial_search_terms) as search_form:
new_terms = search_form.show_form() # blocks
if new_terms == SearchFormResult.CANCEL:
log.debug("...but the user clicked 'cancel'")
elif new_terms == SearchFormResult.SKIP:
log.debug("...but the user clicked 'skip'")
else:
log.debug("...and the user provided: '", new_terms, "'")
return new_terms
# ==========================================================================
def __choose_series_ref(self, book, search_terms_s, series_refs):
'''
This method displays the SeriesForm, a dialog that shows all of the
SeriesRefs from a database query and asks the user to choose one.
'book' -> the book that we are currently scraping
'search_terms_s' -> the search terms we used to find the SeriesRefs
'series_refs' -> a set of SeriesRefs; the results of the search
This method returns a SeriesFormResult object (from the SeriesForm).
'''
result = SeriesFormResult(SeriesFormResult.SEARCH) # default
if series_refs:
log.debug('displaying the series selection dialog...')
with SeriesForm(self, book, series_refs, search_terms_s) as sform:
result = sform.show_form()
log.debug(' ...user chose to ', result.get_debug_string())
return result
# ==========================================================================
def __choose_issue_ref(self, book, series_ref, issue_refs, force_b):
'''
This method chooses the IssueRef that matches the given book from among
the given set of IssueRefs. It may do this automatically if it can, or
it may display the IssueForm, a dialog that display the IssueRefs and
asks the user to choose one.
'book' -> the book that we are currently scraping
'series_ref_s' -> the SeriesRef for the given set of issue refs
'issue_refs' -> a set of IssueRefs; the ones we're choosing from
'force_b' -> whether we should force the IssueForm to be shown, or
only show it when we have no choice.
This method returns a IssueFormResult object (from the IssueForm).
'''
result = None; # the return value; must start out null
series_name_s = series_ref.series_name_s
issue_num_s = '' if not book.issue_num_s else book.issue_num_s
# 1. try to find the issue number directly in the given issue_refs.
if issue_num_s:
counts = {}
for ref in issue_refs:
counts[ref.issue_num_s] = counts.get(ref.issue_num_s, 0) + 1
if issue_num_s in counts and counts[issue_num_s] > 1:
# the same issue number appears more than once! user must pick.
log.debug("found more than one issue number ", issue_num_s, )
issue_refs = \
[ref for ref in issue_refs if ref.issue_num_s == issue_num_s]
else:
for ref in issue_refs:
# strip leading zeroes (see issue 81)
if ref.issue_num_s.lstrip('0') == issue_num_s.lstrip('0'):
result = IssueFormResult(IssueFormResult.OK, ref) # found it!
log.debug("found info for issue number ", issue_num_s, )
break
# 2. if we don't know the issue number, and there is only one issue in
# the series, then it is very likely that the database simply has no issue
# *number* for the book (this happens a lot). the user has already seen
# the cover for this issue in the series dialog and chosen it, so no
# point in making them choose it again...just use the one choice we have
if len(issue_refs) == 1 and not issue_num_s and not force_b:
result = IssueFormResult(IssueFormResult.OK, list(issue_refs)[0])
# 3. if there are no issue_refs and that's a problem; tell the user
if len(issue_refs) == 0:
MessageBox.Show(self.comicrack.MainWindow,
"You selected '" + series_name_s + "'.\n\n"
"This series cannot be displayed because it does not \n"
"contain any issues in the Comic Vine database.\n\n"
"You can add missing issues at: http://comicvine.com/",
"Series has No Issues", MessageBoxButtons.OK, MessageBoxIcon.Warning)
result = IssueFormResult(IssueFormResult.BACK)
log.debug("no issues in this series; forcing user to go back...")
elif force_b or not result:
# 4. if we are forced to, or we have no result yet, display IssueForm
forcing_s = ' (forced)' if force_b else ''
hint = result.get_ref() if result else None
log.debug("displaying the issue selection dialog", forcing_s, "...")
with IssueForm(self, hint, issue_refs, series_name_s) as issue_form:
result = issue_form.show_form()
result = result if result else IssueFormResult(IssueFormResult.BACK)
log.debug(' ...user chose to ', result.get_debug_string())
return result # will not be None now
# ==========================================================================
def __query_series_refs(self, search_terms_s):
'''
This method queries the online database for a set of SeriesRef objects
that match the given (non-empty) search terms. It will return a set
of SeriesRefs, which may be empty if no matches could be found.
'''
if not search_terms_s:
raise Exception("cannot query for empty search terms")
with ProgressBarForm(self.comicrack.MainWindow, self, 1) as progbar:
# this function gets called each time an series_ref is obtained
def callback(num_matches_n, expected_callbacks_n):
if not self.__cancelled_b:
if not progbar.Visible:
progbar.prog.Maximum = expected_callbacks_n
progbar.show_form()
if progbar.Visible and not self.__cancelled_b:
progbar.prog.PerformStep()
progbar.Text = 'Searching Comic Vine (' + \
sstr(num_matches_n) + ' matches)'
Application.DoEvents()
return self.__cancelled_b
log.debug("searching for series that match '", search_terms_s, "'...")
series_refs = db.query_series_refs(search_terms_s, callback)
if len(series_refs) == 0:
log.debug("...no results found for this search.")
else:
log.debug("...found {0} results".format(len(series_refs)))
return series_refs
# ==========================================================================
def __query_issue_refs(self, series_ref):
'''
This method queries the online database for a set of IssueRef objects
that match the given SeriesRef. The returned set may be empty if no
matches were found.
'''
log.debug("finding all issues for '", series_ref, "'...")
with ProgressBarForm(self.comicrack.MainWindow, self, 1) as progform:
# this function gets called each time another issue_ref is obtained
def callback(complete_ratio_n):
complete_ratio_n = max(0.0, min(1.0, complete_ratio_n))
if complete_ratio_n < 1.0 and not progform.Visible\
and not self.__cancelled_b:
progform.prog.Maximum = 100
progform.prog.Value = complete_ratio_n * 100
progform.show_form()
if progform.Visible and not self.__cancelled_b:
progform.prog.Value = complete_ratio_n * 100
progform.Text = 'Loading Series Details (' + \
sstr((int)(complete_ratio_n * 100)) + "% complete)"
Application.DoEvents()
return self.__cancelled_b
return db.query_issue_refs(series_ref, callback)
# ==========================================================================
class _ScrapedSeries(object):
'''
An object that contains all the scraped information for a particular
ComicBook series--that is, the SeriesRef for the particular series,
and all of the IssueRefs that are associated with that series.
'''
def __init__(self):
self.series_ref = None
self.issue_refs = None
# ==========================================================================
class _BookStatus(object):
'''
Constants used to represent the various states that a book can be in
while the scraper is running or finished.
'''
SCRAPED = "scraped" # successfully scraped
SKIPPED = "skipped" # user chose to skip this book
UNSCRAPED = "unscraped" # hasn't been scraped yet
DELAYED = "delayed" # hasn't been scraped, try again later
| Python |
###############################################################################
#
# ComicVineScraper.py
#
# This is the 'entry point' into the Comic Vine Scraper add-on for
# ComicRack. This script requires the latest version of ComicRack in order
# to run.
#
#
# Credits: - written and maintained by Cory Banack
# - based on the ComicVineInfo script started by wadegiles
# and perezmu (from the ComicRack) forum
# - xml2py.py and ipypulldom.py modules (c) DevHawk.net
# - ComicVine API (c) whiskeymedia.com (http://api.comicvine.com)
#
# This software is licensed under the Apache 2.0 software license.
# http://www.apache.org/licenses/LICENSE-2.0.html
#
###############################################################################
# coryhigh: FIX THE ABOVE COMMENT!!
import clr
import log
import re
from scrapeengine import ScrapeEngine
from utils import sstr
from comicbook import ComicBook
clr.AddReference('System')
from System.Threading import ThreadExceptionEventHandler
clr.AddReference('System.Windows.Forms')
from System.Windows.Forms import Application, MessageBox, \
MessageBoxButtons, MessageBoxIcon
if False:
# this gets rid of a stubborn compiler warning
ComicRack = None
# ============================================================================
# Don't change this comment; it's needed to integrate into ComicRack!
#
#@Name Comic Vine Scraper...
#@Image comicvinescraper.png
#@Key comic-vine-scraper-cbanack
#@Hook Books, Editor
# ============================================================================
def ComicVineScraper(books):
try:
# fire up the debug logging system
log.install(ComicRack.MainWindow)
# install a handler to catch uncaught Winforms exceptions
def exception_handler(sender, event):
log.handle_error(event.Exception)
Application.ThreadException \
+= ThreadExceptionEventHandler(exception_handler)
# uncomment this to create a pickled load file for my pydev launcher
#with open("k:/sample.pickled", "w") as f:
#cPickle.dump(books, f);
# see if we're in a valid environment
if __validate_environment() and books:
# create a Scraping Engine and use it to scrape the given books.
engine = ScrapeEngine(ComicRack)
comic_books = [ ComicBook(book, engine) for book in books ]
engine.scrape(comic_books)
finally:
# make sure the Winform exception handler is removed
Application.ThreadException -=\
ThreadExceptionEventHandler(exception_handler)
# shut down the logging system
log.uninstall()
# ============================================================================
def __validate_environment():
'''
Checks to see if the current environment is valid to run this script in.
If it is not, an error message is displayed to explain the problem.
Returns True if the current environment is valid, False if it is not.
'''
# the minimum versions required for a valid environment
REQUIRED_MAJOR=0
REQUIRED_MINOR=9
REQUIRED_BUILD=129
valid_environment = True
try:
version = re.split(r'\.', ComicRack.App.ProductVersion)
def hash( major, minor, build ):
return float(sstr(major * 5000 + minor) + "." + sstr(build))
valid_environment = \
hash(int(version[0]),int(version[1]), int(version[2])) >= \
hash(REQUIRED_MAJOR, REQUIRED_MINOR, REQUIRED_BUILD)
if not valid_environment:
log.debug("WARNING: script requires ComicRack ", REQUIRED_MAJOR, '.',
REQUIRED_MINOR, '.', REQUIRED_BUILD, ' or higher. Exiting...')
MessageBox.Show( ComicRack.MainWindow,
'This script reqires a newer version of ComicRack in order to\n' +
'run properly. Please download and install the latest version\n' +
'from the ComicRack website, and then try again.',
'ComicRack Update Required',
MessageBoxButtons.OK, MessageBoxIcon.Warning)
except:
log.debug_exc("WARNING: couldn't validate comicrack version")
valid_environment = True
return valid_environment
| Python |
#corylow: comment and cleanup this file
###############################################################################
# Created on Feb 7, 2010
# @author: Cory Banack
################################################################################
import clr;
clr.AddReference('System.Drawing')
from System.Drawing import Image
# corylow: make this into a static class rather than module variables
_SCRIPT_DIRECTORY = __file__[:-len('resources.py')]
LOCAL_CACHE_DIRECTORY = _SCRIPT_DIRECTORY + 'localCache/'
SETTINGS_FILE = _SCRIPT_DIRECTORY + 'settings.dat'
GEOMETRY_FILE = _SCRIPT_DIRECTORY + 'geometry.dat'
# do NOT change the following lines. they are modified by the build process!
SCRIPT_VERSION = "!DEV!"
if SCRIPT_VERSION.startswith("!"):
SCRIPT_VERSION = "0.0.0"
def createComicVineLogo():
dir = __file__[:-(len(__name__) + len('.py'))]
return Image.FromFile( dir + 'comicvinelogo.png') | Python |
'''
@author: Cory Banack
'''
#corylow: comment and cleanup this file
from utils import sstr
class IssueRef(object):
def __init__(self, issue_num_s, issue_key):
# issue_num_s can be '' or any other string
# issue key must be something ('not issue_key' == False)
if not issue_key or issue_num_s is None:
raise Exception()
self.issue_key = issue_key
self.issue_num_s = issue_num_s.strip()
# used only for comparisons
self._cmpkey_s = sstr(self.issue_key)
def __str__(self):
return "Issue #" + sstr(self.issue_num_s) \
+ " (" + sstr(self.issue_key) + ")"
def __cmp__(self, other):
if not hasattr(other, "_cmpkey_s"):
return -1
if self._cmpkey_s < other._cmpkey_s:
return -1
else:
return 1 if self._cmpkey_s > other._cmpkey_s else 0
def __hash__(self):
return self._cmpkey_s.__hash__()
class SeriesRef(object):
def __init__(self, series_key, series_name_s, start_year_s, publisher_s,
issue_count_s, thumb_url_s):
# series name will become series key if empty
# series key must be something ('not issue_key' == False)
# start_year can be '', or digits
# publisher can be ''
# issue_count_n will be an integer
# thumb_url_s can be None, or a thumbnail url address
if not series_key:
raise Exception()
self.series_key = series_key
self.series_name_s = ("Series " + sstr(series_key)) if not series_name_s\
else series_name_s.strip().replace(r'&', '&')
self.start_year_s = '' if not start_year_s or not start_year_s.isdigit() \
else start_year_s.strip()
self.publisher_s = '' if not publisher_s else publisher_s.strip()
self.issue_count_n=0 if not issue_count_s or not issue_count_s.isdigit() \
else int(issue_count_s)
self.thumb_url_s = None if not thumb_url_s else thumb_url_s.strip()
# used only for comparisons
self._cmpkey_s = sstr(self.series_key)
def __str__(self):
return sstr(self.series_name_s) + " (" + sstr(self.series_key) + ")"
def __cmp__(self, other):
if not hasattr(other, "_cmpkey_s"):
return -1
if self._cmpkey_s < other._cmpkey_s:
return -1
else:
return 1 if self._cmpkey_s > other._cmpkey_s else 0
def __hash__(self):
return self._cmpkey_s.__hash__()
class Issue(object):
def __init__(self):
self.issue_key = ''
self.issue_num_s = ''
self.title_s = ''
self.series_name_s = ''
self.publisher_s = ''
self.imprint_s = ''
self.alt_series_name_s = ''
self.summary_s = ''
self.month_s = ''
self.year_s = ''
self.start_year_s = ''
self.characters_s = ''
self.teams_s = ''
self.locations_s = ''
self.writer_s = ''
self.penciller_s = ''
self.inker_s = ''
self.cover_artist_s = ''
self.editor_s = ''
self.colorist_s = ''
self.letterer_s = ''
self.webpage_s = ''
self.rating_n = 0 # MUST be a float
self.image_urls = []
# coryhigh: improve classes here?
# (i.e. explicitly subclassing "object")
# def get_title_s(self):
# return self.__title_s
#
# def set_title_s(self, value):
# self.__title_s = "bob";
#
# def del_title_s(self):
# del self.__title_s
#
# title_s = property(get_title_s, set_title_s, del_title_s, "bah")
#
def __str__(self):
return "Issue #" + sstr(self.issue_num_s) \
+ " (" + sstr(self.issue_key) + ")"
# =============================================================================
class DatabaseConnectionError(Exception):
'''
A special exception that gets thrown anytime there is an network error while
trying to contact the scraper database. This is normally because the
database is down or unresponsive, or the user's internet connection is down.
'''
# ==========================================================================
def __init__(self, database_name_s, url_s, underlying):
'''
database_name_s -> the name of the database that raised this error
url_s -> the url that caused the problem
underlying => the underlying io exception or error
'''
super(Exception,self).__init__(sstr(database_name_s) +
" database could not be reached\n"\
"url: " + url_s + "\nCAUSE: " +
sstr(underlying).replace('\r','') ) # .NET exception
self.__database_name_s = sstr(database_name_s)
# ==========================================================================
def db_name_s(self):
''' Returns the name of the database that raised this exception. '''
return self.__database_name_s | Python |
'''
This module contains is the gateway to whatever "database" this script uses to
obtain information about comic books. The exact nature of this database is
intentionally vague in order to remain modular and interchangable. In other
words, the behaviour of this "comic book database" can be understood entirely
by knowing the contract described by the public functions in this module.
While no promises about the underlying implementation are made, you should
expect that this implementation accesses data from a remote source, and
therefore may be quite slow!
@author: Cory Banack
'''
import cvdb
import utils
# a cache for storing the results of series reference searches
# maps 'search terms string' -> 'list of SeriesRefs objects'
__series_ref_cache = {}
# =============================================================================
def query_series_refs(search_terms_s, callback_function=lambda x,y : False):
'''
This method takes a some search terms (space separated words) and uses them
to query the database for a comic book series objects that match those words.
Each matching series is encoded as a SeriesRef object; this method returns
a set of them. The set may be empty if no series matches the search, or
if the search is cancelled (see below).
You can pass in an optional callback function, which MAY be called
periodically while the search is accumulating results. This function takes
two arguments:
an integer: how many matches have been found so far
an integer: how many times the callback is expected to be called
The function must also return a boolean indicating whether or not to CANCEL
the search. If this returned value is ever true, this query should
stop immediately and return an empty set of results.
'''
# An internal implementation of query_series_refs that caches results
# for faster repeat lookups.
global __series_ref_cache
if search_terms_s in __series_ref_cache:
return list(__series_ref_cache[search_terms_s])
else:
series_refs = cvdb._query_series_refs(search_terms_s, callback_function)
if len(__series_ref_cache ) > 10:
__series_ref_cache = {} # keep the cache from ever getting too big
__series_ref_cache[search_terms_s] = list(series_refs)
return series_refs
# =============================================================================
def query_issue_refs(series_ref, callback_function=lambda x,y : False):
'''
This method takes a SeriesRef object (not None) and uses it to
query the database for all comic book issues in that series.
Each issue is encoded as a IssueRef object; this method returns
an set of them. The set may be empty if the series has no
issues, or if the query is cancelled (see below).
You can pass in an optional callback function, which MAY be called
periodically while the IssueRefs are accumulating. This function takes
one float argument: the percentage (between 0.0 and 1.0) of the available
IssueRefs that have been read in so far.
The function must also return a boolean indicating whether or not to cancel
the query. If this returned value is true, the query should stop
immediately and return an empty set of results.
'''
return cvdb._query_issue_refs(series_ref, callback_function)
# =============================================================================
def query_issue(issue_ref):
'''
This method takes an IssueRef object (not None) and uses it to query the
database for all of the details about that issue, which are returned
in a new Issue object.
'''
return cvdb._query_issue(issue_ref)
# =============================================================================
def query_image(ref):
'''
This method takes either an IssueRef object, a SeriesRef object, or a direct
URL string, and queries the database for a single associated cover image.
If no image can be found, if an error occurs, or if the given ref is None,
this method will return None.
Note that the returned Image object (if there is one) is a .NET Image object,
which must be explicitly Disposed() when you are done with it, in order
to prevent memory leaks.
'''
return utils.strip_back_cover( cvdb._query_image(ref) ) | Python |
# encoding: utf-8
'''
This module is used to find imprint names for publishers in the ComicVine
database.
Since ComicVine doesn't directly store imprint information, we are forced to
maintain a internal tables of known imprints and their publishers in order to
do this.
If you run this module, it will query the ComicVine database and report the
discrepancies between the publishers in ComicVine, and the ones in our
internal tables (i.e. when a new publisher appears, you can easily find out
about it and add it to the internal tables.)
@author: Cory Banack
'''
import clr
import log
import re
from utils import sstr
clr.AddReference('System')
from System import Text
from System.IO import StreamReader, StringWriter
clr.AddReference('System.Net')
from System.Net import WebException, WebRequest
clr.AddReference('System.Web')
from System.Web import HttpUtility
# =============================================================================
def find_parent_publisher(imprint_s):
'''
This method takes a publisher string that might be an imprint of another
publisher. If it is an imprint, the method returns a different publisher
string representing the parent publisher for that imprint. If it is not
an imprint, this method returns the string that was passed in.
Both the passed in and returned strings for these methods should EXACTLY
match their corresponding values in the ComicVine database (i.e. case,
punctation, etc.)
'''
imprint_s = imprint_s.strip() # because the tables below are stripped, too
parent_s = imprint_s
if imprint_s in __imprint_map:
parent_s = __imprint_map[imprint_s]
return parent_s
# the publishers that we know about that have at least one imprint
__MARVEL = "Marvel"
__DC = "DC Comics"
__DARKHORSE = "Dark Horse Comics"
__MALIBU = "Malibu"
__AMRYL = "Amryl Entertainment"
__AVATAR = "Avatar Press"
__WIZARD = "Wizard"
__TOKYOPOP = "Tokyopop"
__DYNAMITE = "Dynamite Entertainment"
__IMAGE = "Image"
__HEROIC = "Heroic Publishing"
__PENGUIN = "Penguin Group"
__HAKUSENSHA = "Hakusensha"
__APE = "Ape Entertainment"
__NBM = "Nbm"
__RADIO = "Radio Comix"
__SLG = "Slg Publishing"
__TOKUMA = "Tokuma Shoten"
# the mapping of imprint names to their parent publisher names
__imprint_map = {
"2000AD": __DC,
"Adventure": __MALIBU,
"America's Best Comics": __DC, # originally image
"Wildstorm": __DC,
"Antimatter": __AMRYL,
"Apparat": __AVATAR,
"Black Bull": __WIZARD,
"Blu Manga": __TOKYOPOP,
"Chaos! Comics": __DYNAMITE,
"Cliffhanger": __DC,
"CMX": __DC,
"Dark Horse Manga": __DARKHORSE,
"Desperado Publishing": __IMAGE,
"Epic": __MARVEL,
"Focus": __DC,
"Helix": __DC,
"Hero Comics": __HEROIC,
"Homage comics": __DC, # i.e. wildstorm
"Hudson Street Press": __PENGUIN,
"Icon Comics": __MARVEL,
"Impact": __DC,
"Jets Comics": __HAKUSENSHA,
"KiZoic": __APE,
"Marvel Digital Comics Unlimited" : __MARVEL,
"Marvel Knights": __MARVEL,
"Marvel Music": __MARVEL,
"Marvel UK": __MARVEL,
"Max": __MARVEL,
"Milestone": __DC,
"Minx": __DC,
"Papercutz": __NBM,
"Paradox Press": __DC,
"Piranha Press": __DC,
"Razorline": __MARVEL,
"ShadowLine": __IMAGE,
"Sin Factory Comix" : __RADIO,
"Slave Labor": __SLG,
"Soleil": __MARVEL,
"Tangent Comics": __DC,
"Tokuma Comics": __TOKUMA,
"Ultraverse": __MALIBU,
"Vertigo": __DC,
"Zuda Comics": __DC,
}
# a set of all non-imprint publishers in the comic vine database
# (used only for the imprint search script below)
__other_publishers = frozenset([
__MARVEL,
__DC,
__DARKHORSE,
__MALIBU,
__AMRYL,
__AVATAR,
__WIZARD,
__TOKYOPOP,
__DYNAMITE,
__IMAGE,
__HEROIC,
__PENGUIN,
__HAKUSENSHA,
__APE,
__NBM,
__RADIO,
__SLG,
__TOKUMA,
"01Comics.com",
"12 Gates Productions",
"12-Gauge Comics",
"1st Amendment Publishing",
"2-D Graphics",
"20000 Leagues",
"215 INK",
"21st Century Sandshark Studios",
"24 Hour Cynic",
"2Werk",
"3 Finger Prints",
"3-D Zone",
"3-M",
"360ep",
"3DO Comics",
"3H Productions",
"3ntini Editore",
"4 Winds",
"5th Panel Comics",
"801 Media",
"803 Studios",
"88 MPH",
"8th Day Entertainment",
"9th Circle Studios",
"A",
"A D Vision",
"A Division Of Malibu Graphics",
"A List Comics",
"A Silent Comics Inc",
"A Wave Blue World",
"A-Plus",
"A.M. Works",
"A.R.C.",
"A10 Comics",
"AAA Milwaukee",
"AAA Pop",
"AC",
"ADV Manga",
"AKA Comics",
"ANIA",
"APComics",
"AS Film Inform",
"Aardvark",
"Aardwolf Productions",
"Aazurn",
"Abaculus",
"Abacus Press",
"Abbeville Press",
"Aberration Press",
"Abiogenesis Press",
"Ablaze Media",
"Abnormal Fun",
"About Comics",
"Abril",
"Absence Of Ink",
"Absolute Blue Graphics",
"Absolute Tyrant",
"Abstract Studio",
"Ac Collector Classics",
"Academy Comics Ltd",
"Acclaim",
"Ace Comics",
"Ace Magazines",
"Ace Publications Inc",
"Acetylene",
"Acg",
"Acid Rain Studios",
"Acme",
"Across The Pond Studios",
"Action Folksinger",
"Action Planet Comics",
"Active Images",
"Active Synapse",
"Adhesive Comics",
"Adhouse Books",
"Adversary Comix",
"Aeon",
"Aerosol Press",
"After Hours Press",
"Afterburn Comics",
"Aftershock Comics",
"Age Of Heroes",
"AiT/Planet Lar",
"Airbrush",
"Aircel Publishing",
"Airship Entertainment",
"Aja Blu Comix",
"Ajax",
"Ajax-Farrell",
"Akita Shoten",
"Al Fago Magazines",
"Alamat Comics",
"Albert Bonniers F",
"Albin Michel",
"Alchemy Studios",
"Alchemy Texts",
"Alfabeta",
"Alias Enterprises",
"Aliwan Comics",
"All Thumbs Press",
"All-Negro Comics",
"Allers",
"Allers Forlag",
"Allied Comics",
"Almighty Publishing",
"Alpha Productions",
"Alpha/Streck Enterprises",
"Alpine Underground",
"Alterna Comics",
"Alternative Press",
"Alvglans",
"Amalgam Comics", # this is marvel AND dc? what to do?
"Amalgamated Press",
"Amazing Aaron Productions",
"Amazing Comics",
"Amazing Creations Ink",
"Ambrosia Publishing",
"American Cancer Society",
"American Comics Group",
"American Friends Service Committee",
"American Mule",
"American Red Cross",
"American Visuals Corporation",
"Americanime",
"Americomics",
"Amerotica",
"Anarchy Studios",
"Andrews And Mcmeel",
"Angel Entertainment",
"Angel Gate Press",
"Angry Viking Press",
"Anime Works",
"Another Rainbow",
"Antarctic Press",
"Anti-Ballistic Pixelations",
"Anubis Press",
"Anvil",
"Apocalypse",
"Apple",
"Application Security, Inc.",
"Approbation Comics",
"Aragon",
"Arcade Comics",
"Arcana Studio",
"Archaia Studios Press",
"Archangel Studios",
"Archie",
"Archie Adventure Series",
"Ardden Entertainment",
"Argo Publications",
"Aria Press",
"Aries Publications",
"Arleigh",
"Armada",
"Armageddon Press Inc.",
"Arnoldo Mondadori Editore",
"Arrache Coeur",
"Arrow",
"Arsenic Lullaby Publishing",
"Askild",
"Aspen MLT",
"Asplunds",
"Astonish Comics",
"Astorina",
"Astronaut Ink",
"Asuka Comics",
"Asylum Press",
"Atari",
"Atlantic F",
"Atlantis Studios",
"Atlas",
"Atlas Comics",
"Atomeka Press",
"Atomic Basement",
"August House",
"Austintations",
"Avalon Communications",
"Aviation Comics",
"Avon",
"Awesome",
"Azteca Productions",
"B",
"B & H Publishing Group",
"B&D Pleasures",
"BBC Books",
"BBC Magazines",
"BC",
"BKR Comics",
"BSV - Williams",
"Baby Tattoo Books",
"Bad Dog Books",
"Bad Habit Books",
"Bad Karma Productions",
"Bad Press Ltd",
"Badroach Publications",
"Bagheera",
"Bailey Publishing Co",
"Bakh",
"Bald Guy Studios",
"Baldini Castoldi Dalai Editore",
"Ball Publishing",
"Ballantine Books",
"Bam",
"Banana Tales Press",
"Bandai Entertainment",
"Bang! Entertainment",
"Bantam Books",
"Barbour Christian Comics",
"Basement Comics",
"Bastei Verlag",
"Battlebooks Incorporated",
"Baychild Productions",
"Beanworld Press",
"Beckett",
"Belif",
"Bell Features",
"Berghs",
"Berkley Books",
"Berserker Comics",
"Best Destiny",
"Best Friends Publication",
"Beta 3",
"Betel",
"Beyond Time Comic",
"Big Balloon",
"Big Bang Comics",
"Big City Comics",
"Big Dog Ink",
"Big Head Press",
"Big Shot Comics",
"Big Umbrella",
"Bioroid Studios",
"Bishop Press",
"Black Boar Press",
"Black Cat Comics",
"Black Coat Comics",
"Black Diamond Effect Inc.",
"Black Eye",
"Black Library",
"Black Rock Design",
"Black ball Comics",
"Blackline Studios",
"Blacklist Studios",
"Blackmore",
"Blackout",
"Blackthorne",
"Bladkompaniet As",
"Blind Ferret",
"Blind Wolf",
"Bliss on Tap Publishing",
"Bloodfire Studios",
"Bloody Mary Comics",
"Blue Comet Press",
"Blue King Studios",
"Bluewater Productions",
"Bob Ross",
"Bodog",
"Boemerang",
"Bokf",
"Bokfabriken",
"Bokomotiv",
"Bompiani",
"Bones",
"Boneyard Press",
"Bongo",
"Bonnier Carlsen",
"Bonniers Juniorf",
"Boom! Studios",
"Boundless Comics",
"Brain Scan Studios",
"Brainstorm",
"Brave New Words",
"Brett's Comic Pile Publishing",
"Brick Computer Science Institute",
"Broadsword Comics",
"Broadway",
"Broken Halos",
"Broken Tree Comics",
"Broken Voice Comics",
"Brown Shoe Company",
"Bruce Hershenson",
"Bubblehead Publishing",
"Budget Books",
"Buffalo Books",
"Bulletproof Comics",
"Burlyman Entertainment",
"Buymetoys.com",
"C.A.M. Press",
"C.C.A.S. Publication",
"C.M.I. Corporativo Mexicano de Impresión",
"CARNIVAL COMICS",
"CBG",
"CEA Casa Editrice Astoria",
"CFW Enterprises",
"Cackling Imp Press",
"Cafe Digital",
"Cahaba Producttions",
"Caliber Comics",
"California Comics",
"Cambridge House Publishers",
"Campus Editions",
"Candle Light Press",
"Candlewick Press",
"Canew Ideas",
"Capcom",
"Capital Comics",
"Capitol Stories",
"Capstone Press",
"Captain Clockwork",
"Caption Comics",
"Carabosse Comics",
"Carbon-Based Comics",
"Carlsen Comics",
"Carlton Publishing",
"Carnal Comics",
"Carnopolis",
"Carol Ediciones S.A. de C.V.",
"Cartoon Art",
"Cartoon Books",
"Casa Editrice Dardo",
"Casa Editrice Universo",
"Casterman",
"Castle Rain",
"Catalan Communications",
"Catastrophic Comics",
"Catwild Publications",
"Celebrity",
"Cellar Door Publishing",
"Centaur",
"Central Park Media",
"Century Publications",
"Cepim",
"Cge From",
"Channel M",
"Chanting Monks Studios",
"Chaotic Unicorn Press",
"Charlton",
"Checker Book Publishing Group",
"Cherry",
"Cherry Comics",
"Chicago Mail Order Comics",
"Chick Publications",
"Children",
"Chronicle Books",
"Chuang Yi",
"Cinebooks",
"Circle Media",
"Cirkelf",
"Claypool Comics",
"Club 408 Graphics",
"Coffin Comics",
"Colburn Comics",
"Colour Comics Pty Ltd",
"Columbia Comics",
"Com.X",
"Comely Comix",
"Comic Art",
"Comic Book Legal Defense Fund",
"Comic Legends Legal Defense Fund",
"Comic Media",
"Comic Shop News Inc.",
"Comico",
"Comics Interview",
"Comics Unlimited Inc.",
"Comicsonair Publications",
"Commercial Comics",
"Commercial Signs Of Canada",
"Committed Comics",
"Company & Sons",
"Comunicaciones Graficas Comgraf",
"Condor Verlag",
"Coniglio Editore",
"Conquest Press",
"Continuity",
"Continum",
"Conundrum Press",
"Corgi",
"Corriere Della Sera",
"Crack",
"Cracked Pepper Productions",
"Craf Publishers",
"CrankLeft",
"Creative Impulse Publishing",
"Creative One",
"Creators Edge Press",
"Creston Publishing Corporation",
"Critical Mass",
"Cross Plains Comics",
"Cross Publications",
"Crossgen",
"Crown Publishers",
"Croydon Publishing",
"Crusade",
"Crush Dice Comics Company",
"Cry For Dawn Productions",
"Cryptic Press",
"Crystal Comics",
"Ctrl Alt Del",
"Cult Press",
"Cupples & Leon",
"Cyberosia Publishing",
"Cyclone Comics",
"D. S. Publishing",
"D.C. Thomson & Co.",
"DK Publishing",
"DQU COMICS",
"DWAP Productions",
"Dab Enterprises",
"Dabel Brothers Productions",
"Daewon C.I.",
"Dagens Nyheters F",
"Dagger Enterprises",
"Daim Press",
"Dakuwaka Productions",
"Dancing Elephant Press",
"Danity Kane Comics",
"Dare Comics",
"Dargaud",
"Dark Elf Designs",
"Dark Fantasy Production",
"Dark Ocean Studios",
"Darkchylde Entertainment",
"Darkmatter",
"David Mckay",
"David Miller Studios",
"Dead Dog",
"Dead Numbat Productions",
"DeadBox Art Studio",
"Deadline Publications Ltd.",
"Def Con Comics",
"Defiant",
"Del Rey",
"Dell",
"Delta Verlag",
"Deluxe",
"Deluxe Comics",
"Dengeki",
"Dennis F",
"Der Freibeuter",
"Determined Productions, Inc.",
"Devil's Due",
'Dial "C" for Comics',
"DigiCube",
"Digital Manga Distribution",
"Digital Webbing",
"Dimension Graphics",
"Dimestore Productions",
"Dino Comics",
"Disney",
"Diva Graphix",
"Do Gooder Press",
"Dobry Komiks",
"Dolmen Publishing",
"Dork Storm",
"Double A Comics",
"Double Edge Publications",
"Dover Publications",
"Dr Master Productions",
"DrMaster Publications Inc.",
"Dragon Candy Productions",
"Dragon Comics",
"Dragon Lady Press",
"Dramenon Studios",
"Drawn",
"Drawn & Quarterly",
"Dreamwave Productions",
"Drumfish Productions",
"Dupuis",
"Dynamic Forces",
"Dynamite",
"E",
"Eagle Comics",
"Eastern Color",
"Eastern Comics",
"Ec",
"Echo 3 Worldwide",
"Eclectic Comix",
"Eclipse",
"Eddie Campbell Comics",
"Eden",
"Edgewater Comics",
"Ediciones B",
"Ediciones José G. Cruz",
"Ediciones La Cúpula S.L.",
"Ediciones Latinoamericanas",
"Ediciones de la Flor",
"Edifumetto",
"Ediperiodici",
"Edition C",
"Editions First - Gründ - Dragon d'Or",
"Éditions Glénat",
"Editions La Joie de Lire",
"Editor",
"Editora Trama",
"Editora Vord",
"Editorial Alfaguara",
"Editorial Ejea",
"Editorial Greco (Grupo Editorial Colombiano)",
"Editorial Icavi Ltda.",
"Editorial Juventud",
"Editorial Manuel del Valle",
"Editorial Novaro",
"Editorial OEPISA",
"Editorial Rodriguez",
"Editorial Televisa",
"Editorial Toukan",
"Editorial Tucuman",
"Editoriale Corno",
"Editoriale Mercury",
"Editormex Mexicana",
"Editrice Cenisio",
"Editrice Puntozero",
"Edizioni Alpe",
"Edizioni Araldo",
"Edizioni Audace",
"Edizioni BD",
"Edizioni del Vascello",
"Edizioni San Paolo",
"Edizioni Star Comics",
"Edizioni d’Arte “Lo Scarabeo”",
"Educomics",
"Edwin Aprill",
"Eerie Publications",
"Egmont",
"Ehapa Verlag",
"El Capitan",
"El Mundo",
"Electric Spaghetti Comics",
"Elevenstone Studios",
"Elite Comics",
"Elvifrance",
"Empresa Editora Zig Zag S.A.",
"Endless Horizons Entertainment",
"Enemy Transmission",
"Enigma Comics",
"Enterbrain",
"Entity",
"Entity Comics",
"Enwill Associates",
"Epix",
"Eros Comix",
"Esteem Comics",
"Etc",
"Eternal",
"Eternity",
"Eura Editoriale",
"Eurotica",
"Event Comics",
"Everyman Studios",
"Evil Twin Comics",
"Evolution Comics",
"Excellent Publications",
"Exploding Albatross Funnybooks",
"Express",
"Extreme",
"F",
"FC9",
"Fabel",
"Factoid Books",
"False Idol Studios",
"Famous Funnies",
"Fandom House",
"Fangoria",
"Fantaco",
"Fantaco Enterprises",
"Fantagor Press",
"Fantagraphics",
"Farrar, Straus, and Giroux",
"Fathom Press",
"Fawcett Publications",
"Feest Comics",
"Felix Comics Inc.",
"Femme Fatales Comics",
"Fenickx Productions",
"Ferret Press",
"Fiasco Comics",
"Fiction House",
"Fiery Studios",
"Filmation",
"Fireman Press LTD.",
"First",
"First Salvo",
"First Second Books",
"Fishtales Inc Productions",
"Fishwrap Production",
"Fitzgerald Publishing Company",
"Flaming Face Productions",
"Fleetway",
"Fluid Friction",
"Fluide Glacial",
"Forbidden Fruit",
"Formatic",
"Fortress Publishing",
"Four Star Publications",
"Fox",
"Fox Atomic Comics",
"Foxtrot",
"Fragile Press",
"Franco Cosimo Panini",
"Frew Publications",
"Friendly",
"Frightworld",
"Full Bleed Studios",
"Full Circle Publications",
"Full Impact Comics",
"Full Stop Media",
"Fun Publications",
"Funk-O-Tron",
"Funnies Inc",
"Furio Viano Editore",
"Futabasha Publishers Ltd.",
"Future Comics",
"FutureQuake Press",
"Futuropolis",
"G & T Enterprises",
"G. Vincent Edizioni",
"GG Studio",
"Galassia",
"Galaxinovels",
"Galaxy Publishing",
"Game Players Comics",
"Games Workshop",
"Gangan Comics",
"Garage Graphix",
"Gary Philips",
"Gauntlet Comics",
"Gaviota",
"Gearbox Press",
"Gebers",
"Gem Publications",
"Gemestone Publ.",
"Gemstone",
"General Electric Company",
"Generation Comics",
"Genesis West",
"Genome Studios",
"George A. Pflaum",
"Georgia Straight",
"Giant in the Playground",
"Gilberton Publications",
"Gilmor",
"Gladstone",
"Glenat Italia",
"Globe Communications",
"Gold Key",
"Gold Star Publications Ltd.",
"Golden Press",
"Golfing / McCombs",
"Good Comics, Inc",
"Gotham Entertainment Group",
"Granata Press",
"Grand Central Publishing",
"Graphic Arts Service, Inc.",
"Graton Editeur",
"Gratuitous Bunny Comix",
"Great Big Comics",
"Great Publications",
"Great Smoky Mountains Historical Association",
"Greater Mercury",
"Green Man Press",
"Green Publishing",
"Grosset And Dunlap, Inc.,",
"Ground Zero Comics",
"Grupo Editorial Vid",
"Guild Publications",
"Gutsoon",
"H. C. Blackerby",
"H. H. Windsor",
"HB Comics",
"HELOCK COMICS",
"HM Communications",
"Hachette",
"Hall Of Heroes",
"Hallden",
"Halloween",
"Hamilton Comics",
"Hammarstr",
"Hamster Press",
"Hand Of Doom Publications",
"Handelsanst",
"Happy Comics Ltd.",
"Harpercollins",
"Harperperennial",
"Harrier",
"Harris Comics",
"Harry A. Chesler/Dynamic",
"Harry N. Abrams",
"Harvey",
"Harvey Pekar",
"Hasbro",
"Hays Entertainment",
"Headless Shakespeare Press",
"Heavy Metal",
"Helsinki Media",
"Hemmets Journal Ab",
"Heretic Press",
"Hero Initiative",
"Hero Universe",
"Heroscribe Comics!",
"Hershenson",
"Hi No Tori Studio",
"High Impact Entertainment",
"High Top",
"Highwater",
"Highway 62 Press",
"Hillman",
"Holyoke",
"Hot Comics",
"Hound Comics",
"Howard, Ainslee & Co.",
"Hugh Lauter Levin Associates",
"Humanoids",
"Hurricane Entertainment",
"Hyperwerks",
"Hyperwrench Productions",
"I.C.E. Comics",
"I.W. Publishing",
"IDW Publishing",
"INFINITY Publishing",
"INNFUSION",
"IPC Magazines Ltd.",
"Icarus Publications",
"Ice Kunion",
"Ideals Publishing",
"If Edizioni",
"Iguana Comics",
"Illustrated Humor Inc",
"Immortelle Studios",
"Imperium Comics",
"In the Public Domain",
"Industrial Services",
"Infinity Comics",
"Infinity Studios",
"Infocom",
"Innovation",
"Insight Editions",
"Insomnia Press",
"International Presse Magazine",
"Iron Circus Comics",
"Islas Filipinas Publishing Co.",
"J",
"JGM Comics",
"Jabberwocky Graphix",
"Jack Rabbit Stewdios",
"Jademan",
"Jemi F",
"Jetpack Press",
"Jeunesse Joyeuse",
"Jochen Enterprises",
"Joe Deagnon",
"John Andersson",
"Jump Back Productions",
"Jump Comics",
"JuniorPress BV",
"Juvee Comics",
"K",
"Kadokawa Shoten",
"Kana",
"Kappa Edizioni",
"Kathang Indio",
"Kean Soo",
"Keenspot Entertainment",
"Ken Pierce Inc.",
"Kenzer And Company",
"Key Publications",
"Kickstart Comics",
"King Comics",
"King Features Syndicate",
"King Hell",
"Kirby Publishing Company",
"Kitchen Sink",
"Kitty Publications",
"Kk",
"Knockabout",
"Known Associates Press",
"Kodansha",
"Krause Publications",
"Kyle Baker Publishing",
"L",
"L. Miller & Son, Ltd",
"L.F.P.",
"LFB Luigi F. Bona",
"La Musardine",
"La Repubblica / L'Espresso",
"Lab Rat Productions",
"Last Gasp",
"Layne Morgan Media",
"Le Lombard",
"Leadbelly Publications",
"Leader Enterprises",
"Leading Edge Comics",
"Lee Beardall",
"Legion Of Evil Press",
"Lego",
"Lerner Publishing Group",
"Les Editions Dargaud",
"Lev Gleason",
"Liar Comics",
"Lightning Comics",
"Lightspeed Press",
"Lindblads F",
"Lindqvists",
"Linsner.com",
"Lion King",
"Lion Library",
"Literacy Volunteers Of Chicago",
"Literary Enterprises",
"Little, Brown & Co.",
"Lodestone",
"Lohman Hills",
"London Night Studios",
"Lone Star Press",
"Lost Cause Productions",
"Lubrix",
"Lucky Dragon Comics",
"Lumen",
"M. F. Enterprises",
"MAD Books",
"MDS Studios",
"MJF Books",
"MVCreation",
"Mad Dog Graphics",
"Mad Love Publishing",
"Mad Monkey Press",
"Maerkle Press",
"Magazine Enterprises",
"Magazine Management",
"Magazzini Salani",
"Magic Press",
"Magnus & Bunker",
"Majestic Entertainment",
"Major Magazines",
"Malmborg",
"Manga 18",
"Mango Comics",
"Mansion Comics",
"Manuscript Press",
"Maple Leaf Publishing",
"Mark",
"Markosia",
"Marsu Productions",
"Martin L. Greim",
"Max Bunker Press",
"Maximum Press",
"McCain Ellio's Comics",
"McK Publishing",
"McMann & Tate",
"Mcfarland",
"Media Press",
"Media Works",
"Megaton Comics",
"Memory Lane Publication",
"Mercury Comics",
"Metro Comics",
"Metropolitan Books",
"Midnight Sons",
"Mighty Comics",
"Mighty Pumpkin",
"Millennium Publications",
"Milson",
"Mina Editores",
"Mindgame Press Page One, Inc.",
"Mirage",
"Modern",
"Mojo Press",
"Mondial",
"Mondo Bizzarro",
"Monkeysuit Press",
"Monster Comics",
"Monsterverse",
"Moonface Press",
"Moonstone",
"Morning Star Productions",
"Mosaik Steinchen F",
"Motorcycleboy Comics",
"Mr. Comics",
"Mu Press",
"Mulehide Graphics",
"Murray Comics",
"NFPA",
"Naked City",
"Narwain",
"Nate Butler",
"Nation-Wide comics",
"National Comics Publication",
"National Comics Publications",
"National Comics Publishing",
"National Periodical Publications",
"National Periodical Publications Inc",
"Nationella Trafiks",
"Nedor",
"Neko Press",
"NeoSun",
"Neuer Tessloff Verlag",
"New American Library",
"New Comics Group",
"New England Comics",
"New Media Publications",
"New Universe",
"New Worlds",
"Newsbook Publishing",
"Newspaper: Funny Pages",
"Nickel Editions",
"Nicotat",
"Night Wynd Enterprises",
"No Mercy",
"Noble Comics",
"Norbert Hethke Verlag",
"Norma Editorial",
"Normans",
"Northstar",
"Nostalgia Press",
"Novaris Entertainment",
"Novedades Editores",
"Novelty Press",
"Now",
"Ocean Comics",
"Odhams Press",
"Odyssey Comics",
"Ok",
"Oktomica",
"Olio",
"Olympian Publishing",
"Olyoptics",
"Oni Press",
"Opal",
"Orang Utan Comics",
"Ordfront",
"Orin Books",
"Oscar Caesar",
"Oval Projects Limited",
"P.F. Volland Company",
"PSG Publishing House",
"Pacific",
"Pacific Comics",
"Palisades Press",
"Palliard Press",
"Pan",
"Pandora Press",
"Panini Comics",
"Pantheon Books",
"Paper Dragonz",
"Paper Street Comics",
"Paper Tiger Comics",
"Paper Tiger Comix",
"Papyrus Comics",
"Paquet",
"Parents",
"Parker Editore",
"Parody Press",
"Penny Farthing",
"Pentagon Publishing Co",
"Penthouse Comics",
"Peregrine Entertainment",
"Personality",
"Phelps Publishing",
"Phi3 Comics",
"Philomel Books",
"Phoenix Fire Studios",
"Picturebox",
"Pied Piper",
"Pines Comics",
"Pingvinf",
"Pinnacle Comics",
"Pioneer Books Inc.",
"Planet Comics",
"Planet Publishing",
"Planeta DeAgostini",
"Platinum Studios Comics",
"Play Press",
"Playboy Press",
"Pocket Books",
"Point G Comics",
"Politisk Revy",
"Polystyle",
"Pop Comics",
"Popular Press",
"Poseur Ink",
"Possum Press",
"Power Comics",
"Power Records",
"Praxis Comics",
"Premier Magazines",
"Pride Comics",
"Primal Paper Comics",
"Print Mint",
"Prize",
"Progressive",
"Promotora K",
"Publicaciones Herrerias",
"Publication Enterprises",
"Publistrip",
"Pughouse Press",
"Pulp Theatre",
"Pure Imagination",
"Purrsia",
"Pyramid Communications",
"Q Comics",
"Quality Comics",
"Quality Periodicals",
"RAK Graphics",
"RCS MediaGroup",
"RDS Comics",
"RSquared Studios",
"Rab",
"Rabbit Valley",
"Radbu Productions",
"Radical Publishing",
"Radio Comics",
"Rainbow Media",
"Raj Comics",
"Ralston-Purina Company",
"Random House",
"Rat Race Comix",
"Real",
"Real Free Press",
"Realistic",
"Realm Press",
"Rebel Studios",
"Rebellion",
"Recollections",
"Red 5 Comics",
"Red Circle",
"Red Clown",
"Red Eagle Entertainment",
"Red Top",
"Redbud Studio",
"Redhead Comics",
"Regor Company",
"Renaissance Press",
"Renegade",
"Reprodukt",
"Revolutionary",
"Rheem Water Heating",
"Richters F",
"Rip Off Press",
"Rippu Shobo",
"Robert laffont",
"Rocket North Publishing",
"Roger Corman's Cosmic Comics",
"Ronin Studios",
"Rooster Teeth Productions",
"Rorschach Entertainment",
"Rotopress",
"Rude Dude Productions",
"Running Press",
"Rural Home",
"Russ Cochran",
"Rutledge Hill Press",
"S",
"SANS Entertainment Comics",
"SNK Playmore",
"SQP",
"Saalfield Publishing Company",
"SaberCat Comics",
"Sackmann Und H",
"Sacred Mountain",
"Saint James",
"Salvatore Taormina editore",
"Sanoma Uitgevers",
"Saxon",
"Say/Bart Productions",
"Scar Comics",
"Schibsted",
"Scholastic Book Services",
"Schultz",
"Scrap Pictures",
"Se-Bladene",
"Seaboard Publishing",
"Second To Some",
"Semic As",
"Semic Española de Ediciones",
"Semic International",
"Sergio Bonelli Editore",
"Seven Seas Entertainment",
"Shanda Fantasy Arts",
"Shibalba Press",
"Shinshokan",
"Shivae Studios",
"Shogakukan",
"Shōnen Gahōsha",
"Shooting Star",
"Showcase Publications",
"Shueisha",
"Sig Feuchtwanger",
"Signet Books",
"Silent Devil Productions",
"Silent Nemesis Workshop",
"SilverWolf",
"Silverline",
"Simon And Schuster",
"Sirius Entertainment",
"Skandinavisk Press",
"Skatoon Productions",
"Skywald",
"Sleeping Giant Comics",
"Sm",
"Smithsonian Institution",
"Softbank Creative",
"Solson Publications",
"Sombrero",
"Son of Sam Productions",
"Sound & Vision International",
"Space Goat Productions",
"Spacedog",
"Spark Publications",
"Speakeasy Comics",
"Special Action Comics",
"Special Studio",
"Spectrum Comics",
"Spilled Milk",
"Spire Christian Comics",
"Splitter",
"Spotlight Comics",
"Spotlight Publishers",
"St",
"St. Johns Publishing Co.",
"Stampa Alternativa / Nuovi Equilibri",
"Stanhall",
"Stanley Publications",
"Stanmor Publications",
"Star",
"Star Publications",
"Star Reach Publications",
"Starhead Comix",
"Stats Etc",
"Sterling",
"Stopdragon",
"Storm Lion Publishing",
"Story Comics",
"Straw Dog",
"Strawberry Jam Comics",
"Street & Smith",
"Street And Smith",
"Street and Steel",
"Striker 3D",
"Studio 407",
"Studio Aries",
"Studio Foglio",
"Studio Insidio",
"Studio Ironcat",
"Super Publishing",
"Superior Publishers Limited",
"Sussex Publishing Co",
"Svenska Serier",
"Swappers Quarterly And Almanac",
"T.R.I.B.E. Studio Comics",
"TSR",
"Ta Nea",
"Tapestry",
"Tekno Comix",
"Tell Tale Publications",
"Tempo Books",
"Terminal Press",
"Terra Major",
"Terzopoulos",
"Teshkeel Comics",
"Tetragrammatron",
"Th3rd World",
"The 3-D Zone",
"The Comic Times, Inc.",
"The Heritage Collection",
"The Scream Factory",
"The Toy Man",
"Thorby Enterprises",
"Thoughts",
"Thrill-House Comics",
"Thunder Baas",
"Time Bomb Comics",
"Timeless Journey Comics",
"Timely",
"Timely Illustrated Features",
"Tipografia M. Tomasina",
"Titan Books",
"Titan Magazines",
"Toby",
"Tom Doherty Associates",
"Tom Stacey",
"Tome Press",
"Tonkam",
"Top Cow",
"Top Shelf",
"Topps",
"Torpedo Comics",
"Toutain Editor",
"Tower",
"Tower Books",
"Transfuzion Publishing",
"Transmission X Comics",
"Trepidation Comics",
"Trident Comics",
"Trigon",
"Triumph",
"Triumphant",
"Triumphant Comics",
"Trojan",
"True Believers Press",
"True Patriot Studios",
"Tumble Creek Press",
"Tundra",
"Tundra Uk",
"Tusquets Editores",
"Twomorrow",
"Tyler James Comics",
"UDON",
"US Department of Health, Education and Welfare",
"Uitgeverij C.I.C.",
"Ultimate Comic Group",
"Ultimate Creations",
"Ultimate Sports Force",
"Ungdomsmissionens V",
"Unified Field Operations",
"United Features",
"Universe",
"Utslag",
"Utterly Strange Publications",
"Valiant",
"Valve",
"Vamperotica Entertainment",
"Vanguard Productions",
"Vents d'Ouest",
"Vermillon",
"Verotik",
"Vertical Inc.",
"Victor Gollancz Ltd.",
"Victory",
"View Askew",
"Viper Comics",
"Virgin Comics",
"Virus Comix",
"Vittorio Pavesio Productions",
"Vivid Publishing",
"Viz",
"Viz Premiere",
"Viz Select",
"Vortex",
"Vrijbuiter, De",
"W.T. Grant",
"WCG",
"WSOY",
"Wallace Wood",
"Walt Disney Company Italia",
"Wani Books",
"Warner Books",
"Warner Brothers",
"Warp Graphics",
"Warren",
"Warrior Publications",
"Watson-Guptill Publications",
"Webs Adventure Corporation",
"Welsh Publishing Group",
"Western Publishing.",
"White Buffaloe Press",
"White Lightning Productions",
"Whitman",
"Whitman Publishing",
"Wicked Good Comics",
"Wild and Wooly Press",
"Wildcard Production",
"William H Wise",
"Williams F",
"Willms Verlag",
"Windjammer",
"Winthers",
"Wisconson Department Of justice",
"Wonder Comix",
"Work Horse Comics",
"XL Creations",
"Xanadu Publishing, Inc.",
"Yen Press",
"Yendie Book Publishing",
"Yoe Studio",
"Youthful",
"ZOOLOOK",
"Zenescope Entertainment",
"Ziff",
"Ziff Davis Media",
"Zip Comics",
"Zodiac",
"Zuzupetal Press",
"Zwerchfell",
"comicsone.com",
"eBay",
"ffantasy ffactory",
"iVerse Media",
"inZane Comics",
"mg/publishing",
"self published",
"talcMedia Press",
"¡Ka-Boom! Estudio S.A. de C.V.",
])
# =============================================================================
if __name__ == '__main__':
'''
A script method that can be run to compare the contents of the tables in this
module with the publishers listed in the ComicVine database. This script
should be run periodically, and used to update the tables in this module.
'''
# first, gather the names of all the publishers on the ComicVine database
# by scraping them and adding them to a set of scraped publishers
log.debug()
print "Gathering publishers from ComicVine..."
scraped_publishers = set()
done = False
page = 0
while not done:
page += 1
html = ""
try:
request = WebRequest.Create(
'http://www.comicvine.com/publishers/?page='
+ sstr(page) + '&sort=alphabetical')
response = request.GetResponse()
responseStream = response.GetResponseStream()
reader = StreamReader(responseStream, Text.Encoding.UTF8)
html = reader.ReadToEnd()
except WebException, wex:
print("unexpected web exception: " + str(wex))
finally:
if 'reader' in vars(): reader.Close()
if 'responseStream' in vars(): responseStream.Close()
if 'response' in vars(): response.Close()
writer = StringWriter() # coryhigh: make this into a utility method
HttpUtility.HtmlDecode(html, writer)
html = writer.ToString()
matches =set( [m.strip() for m in
re.findall("(?m)<td[^>]*>\s*<a[^>]*>([^<]*)</a>\s*</td>",html) ] )
done = matches <= scraped_publishers # stop when we are repeating results
if not done:
scraped_publishers.update( matches )
print"Page " + sstr(page) + ", " + sstr(len(matches))+" publishers..."
# 2. now that we've got all the publishers on ComicVine, go through our
# know publishers, and report a) if we have one that isn't in ComicVine
# anymore, and b) if ComicVine now has one that we don't.
all_clear = True
print("")
for imprint in __imprint_map:
if imprint not in scraped_publishers:
all_clear = False
print("Not in ComicVine: " + imprint)
for publisher in __other_publishers:
if publisher not in scraped_publishers:
all_clear = False
print("Not in ComicVine: " + publisher)
for publisher in sorted(scraped_publishers):
if publisher not in __imprint_map and publisher not in __other_publishers:
all_clear = False
print("Not in module: " + publisher)
if all_clear:
print("Nothing to update!") | Python |
'''
This module contains the Comic Vine implementations of the the functions
described in the db.py module. That module can delegate its function calls to
the functions in this module, but other than that, external modules should
NOT call these functions directly.
@author: Cory Banack
'''
import clr
import resources
import cvconnection
import log
import re
import utils
from utils import is_string, is_number, sstr
from dbmodels import IssueRef, SeriesRef, Issue
import cvimprints
clr.AddReference('System')
from System.IO import Directory, File, Path
clr.AddReference('System')
from System.Net import WebRequest
clr.AddReference('System.Drawing')
from System.Drawing import Image
# this cache is used to speed up __issue_parse_series_details. it is a
# memory leak (until the main app shuts down), but it is small and worth it.
__series_details_cache = {}
# =============================================================================
def _query_series_refs(search_terms_s, callback_function):
'''
This method is the Comic Vine implementation of the identically named
method in the db.py module.
'''
# clean up the search terms (to make them more palatable to comicvine
# databases) before searching. if no results are found, clean them up more
# aggressively and try one more time.
search_terms_s = __cleanup_search_terms(search_terms_s, False)
series_refs = __query_series_refs(search_terms_s, callback_function)
if not series_refs:
altsearch_s = __cleanup_search_terms(search_terms_s, True);
if altsearch_s != search_terms_s:
series_refs = __query_series_refs(altsearch_s, callback_function)
return series_refs
# =============================================================================
def __query_series_refs(search_terms_s, callback_function):
''' A private implementation of the public method with the same name. '''
cancelled_b = [False]
series_refs = set()
# 1. do the initial query, record how many results in total we're getting
dom = cvconnection._query_series_ids_dom(search_terms_s, 0)
num_results_n = int(dom.number_of_total_results)
if num_results_n > 0:
# a helpful function that turns a 'volume' into a 'SeriesRef'
def _makeref(volume):
publisher = '' if len(volume.publisher.__dict__) <= 1 else \
volume.publisher.name
thumb = None if len(volume.image.__dict__) <= 1 else \
volume.image.thumb_url.replace(r'thumb', "large")
return SeriesRef( int(volume.id), sstr(volume.name),
sstr(volume.start_year), sstr(publisher),
sstr(volume.count_of_issues), thumb)
# 2. convert the results of the initial query to SeriesRefs and then add
# them to the returned list. notice that the dom could contain single
# volume OR a list of volumes in its 'volume' variable.
if not isinstance(dom.results.volume, list):
series_refs.add( _makeref(dom.results.volume) )
else:
for volume in dom.results.volume:
series_refs.add( _makeref(volume) )
# 3. if there were more than 20 results, we'll have to do some more
# queries now to get the rest of them
RESULTS_PAGE_SIZE = 20
iteration = RESULTS_PAGE_SIZE
if iteration < num_results_n:
num_remaining_steps = num_results_n // RESULTS_PAGE_SIZE
# 3a. do a callback for the first results (initial query)...
cancelled_b[0] = callback_function(
iteration, num_remaining_steps)
while iteration < num_results_n and not cancelled_b[0]:
# 4. query for the next batch of results, in a new dom
dom = cvconnection._query_series_ids_dom(
search_terms_s, sstr(iteration))
iteration += RESULTS_PAGE_SIZE
# 4a. do a callback for the most recent batch of results
cancelled_b[0] = callback_function(
iteration, num_remaining_steps)
if int(dom.number_of_page_results) < 1:
log.debug("WARNING: got empty results page") # issue 33
else:
# 5. convert the current batch of results into SeriesRefs,
# and then add them to the returned list. Again, the dom
# could contain a single volume, OR a list.
if not isinstance(dom.results.volume, list):
series_refs.add( _makeref(dom.results.volume) )
else:
for volume in dom.results.volume:
series_refs.add( _makeref(volume) )
# 6. Done! We've gone through and gathered all results.
return set() if cancelled_b[0] else series_refs
# ==========================================================================
def __cleanup_search_terms(search_terms_s, alt_b):
'''
Returns a cleaned up version of the given search terms. The terms are
cleaned by removing, replacing, and massaging certain keywords to make the
Comic Vine search more likely to return the results that the user really
wants.
'search_terms_s' -> the search terms to clean up
'alt_b' -> true to attempt to produce an alternate search string by also
replacing numerical digits with their corresponding english words
and vice versa (i.e. "8" <-> "eight")
'''
# All of the symbols below cause inconsistency in title searches
search_terms_s = search_terms_s.lower()
search_terms_s = search_terms_s.replace('.', '')
search_terms_s = search_terms_s.replace('_', ' ')
search_terms_s = search_terms_s.replace('-', ' ')
search_terms_s = re.sub(r'\b(vs\.?|versus|and|or|the|an|of|a|is)\b',
'', search_terms_s)
search_terms_s = re.sub(r'giantsize', r'giant size', search_terms_s)
search_terms_s = re.sub(r'giant[- ]*sized', r'giant size', search_terms_s)
search_terms_s = re.sub(r'kingsize', r'king size', search_terms_s)
search_terms_s = re.sub(r'king[- ]*sized', r'king size', search_terms_s)
search_terms_s = re.sub(r"directors", r"director's", search_terms_s)
search_terms_s = re.sub(r"\bvolume\b", r"\bvol\b", search_terms_s)
search_terms_s = re.sub(r"\bvol\.\b", r"\bvol\b", search_terms_s)
# of the alternate search terms is requested, try to expand single number
# words, and if that fails, try to contract them.
orig_search_terms_s = search_terms_s
if alt_b:
search_terms_s = utils.convert_number_words(search_terms_s, True)
if alt_b and search_terms_s == orig_search_terms_s:
search_terms_s = utils.convert_number_words(search_terms_s, False)
# strip out punctuation
word = re.compile(r'[\w]{1,}')
search_terms_s = ' '.join(word.findall(search_terms_s))
return search_terms_s
# =============================================================================
def _query_issue_refs(series_ref, callback_function=lambda x : False):
'''
This method is the Comic Vine implementation of the identically named
method in the db.py module.
'''
# do a 'fast' query, and then a 'safe' query to catch anything the fast one
# might have missed, and remove any obsolete values in the fast query cache.
# the safe query is slower, but it always returns the right values, and it
# can use the results of the fast query to speed itself up.
fast = __query_issue_refs_fast(series_ref, callback_function);
safe = __query_issue_refs_safe( series_ref, fast, callback_function );
log.debug(" ...found ", len(fast), " issues using FAST issue query")
log.debug(" ...found ", len(safe-fast), " more using SAFE issue query")
return safe
# =============================================================================
def __query_issue_refs_fast(series_ref, callback_function=lambda x : False):
'''
This method is a FAST Comic Vine implementation of the identically named
method in the db.py module.
It is not guaranteed to return EVERY issue in comic vine's database, or even
ANY issues, or even (rarely) the RIGHT issues. But it will usually return
most of all of them, and do so fairly quickly.
For examples of where this method can fail, see bugs: 135, 136, 149
'''
# this gets set to true if the user clicks "cancel" during the callback
cancelled_b = [False]
# map of issue_id strings to tuples. contains one mapping for each issue
# that our search (below) will return. each mapped tuple contains
# {issue_number_s, issue_id_s, volume_id_s} (basically, the compact details
# for a single issue.) the main point of the method is to populate this map
issues_to_tuples = {}
series_key_n = int(series_ref.series_key)
series_name_s = sstr(series_ref.series_name_s)
# we'll read cached tuples for this series first. this is a much faster way
# to populate issues_to_tuples than querying comicvine...
# 1. get the name of the tuples cache file.
cache_file = __get_cache_file_path(series_ref, '.dat')
if not Directory.Exists(Path.GetDirectoryName(cache_file)):
Directory.CreateDirectory(Path.GetDirectoryName(cache_file))
# 2. read in all tuples from the cache
loaded_from_cache_n = 0
loaded_from_web_n = 0
if File.Exists(cache_file):
with open(cache_file, 'r') as f:
issue_data = f.readline()
while issue_data: # might contain newline or whitespace
if issue_data.strip():
tuple = issue_data.strip().split(",")
if len(tuple) != 3:
# something's gone very wrong; clear the cache results
issues_to_tuples = {}
break
else:
issue_key = tuple[1]
issues_to_tuples[issue_key] = (tuple[0],tuple[1],tuple[2])
issue_data = f.readline()
del issue_data
loaded_from_cache_n = len(issues_to_tuples)
# 3. now do a query to comicvine to see how many issues we should have; this
# lets us figure out if we need to grab more tuples from the web
dom = cvconnection._query_issue_ids_dom_fast(series_name_s, 0)
total_to_load_n = int(dom.number_of_total_results)
# 4. make sure the cache is valid and current. if it's not, don't use it
missing_issues = total_to_load_n - loaded_from_cache_n
if missing_issues < 0 or missing_issues > 10:
loaded_from_cache_n = 0
issues_to_tuples = {}
File.Delete(cache_file)
# 5. now IF we didn't get all the issues from the cache, start
# querying comicvine to get them individually
PAGE = 20
skip_n = total_to_load_n - PAGE
skip_n = max(0, skip_n)
def _parse_issue_dom(issue_dom):
issue_key_s = issue_dom.id.strip()
vol_key_s = issue_dom.volume.id.strip()
# if the issue number isn't a string, then it's "unknown" (i.e. blank)
issue_number_s = issue_dom.issue_number.strip() if \
is_string(issue_dom.issue_number) else ""
if not issue_key_s or not vol_key_s:
raise Exception("bad dom results from comicvine")
# strip ".00" from the end of issue numbers
issue_number_s = re.sub( r'\.0*\s*$', '', issue_number_s)
if not issues_to_tuples.has_key(issue_key_s):
issues_to_tuples[issue_key_s]=(issue_number_s, issue_key_s, vol_key_s)
return 1
else:
return 0
def _done_loop():
return len(issues_to_tuples) >= total_to_load_n or \
skip_n < 0 or cancelled_b[0];
while not _done_loop():
dom = cvconnection._query_issue_ids_dom_fast(series_name_s, skip_n)
if ("issue" in dom.results.__dict__):
if type(dom.results.issue) == type([]):
for issue in dom.results.issue:
loaded_from_web_n += _parse_issue_dom(issue)
else:
loaded_from_web_n += _parse_issue_dom(dom.results.issue)
cancelled_b[0] = callback_function(
len(issues_to_tuples)/float(total_to_load_n))
skip_n = 0 if skip_n > 0 and skip_n < PAGE else skip_n - PAGE
if cancelled_b[0]:
pass
elif total_to_load_n < len(issues_to_tuples):
# this is rare, if comic vine deletes an issue, and adds 2 new ones, the
# cache is too big now, and we might end up with more loaded issues (from
# web + cache) than there are in CV! at least one issue we've loaded
# is invalid, so better to have this whole method return nothing.
log.debug("warning: cache had obsolete issues. clearing it.")
File.Delete(cache_file) # cache is corrupt, reload it next time
issues_to_tuples = {} # better to return nothing than an invalid list
loaded_from_web_n = 0 # don't write our current tuples out
# 6. update the cache file so next time this operation is faster
if not cancelled_b[0] and loaded_from_web_n > 0:
def _compare_tuples(t1, t2):
return cmp(int(t1[2]), int(t2[2])) or cmp(t1[0], t2[0])
with open(cache_file, 'w') as f:
for tuple in sorted(issues_to_tuples.values(), _compare_tuples):
if is_string(tuple[0]) and is_number(tuple[1])\
and is_number(tuple[2]):
f.write(sstr(tuple[0]) + ',' + \
sstr(tuple[1]) + ',' + sstr(tuple[2]) + '\n')
# 7. prune out the tuples that don't match the current series, and make
# a list of IssueRefs out of the remaining tuples
issue_refs = set([ IssueRef(t[0], t[1]) \
for t in issues_to_tuples.values() if int(t[2])==series_key_n ])
# 8. a little nice debug output...
#if not cancelled_b[0]:
# log.debug(" ---> FAST ISSUE QUERY found ", loaded_from_cache_n, " (of ",
# loaded_from_cache_n + loaded_from_web_n, ") results in the local cache")
return set() if cancelled_b[0] else issue_refs
# =============================================================================
def __query_issue_refs_safe( \
series_ref, already_found, callback_function=lambda x : False):
'''
This method is a SAFE but SLOW Comic Vine implementation of the identically
named method in the db.py module.
It is guaranteed to return EVERY issue in comic vine's database, but it runs
very slowly. If you already know some of the IssueRefs that it will find,
however, you can speed its operation up considerable by providing them as
in the 'already_found' argument (a set of IssueRefs).
This method will never return issues that are not in the comic vine database,
even if such issues are provided in the 'already_found' set.
'''
# a comicvine series key can be interpreted as an integer
series_id_n = int(series_ref.series_key)
cancelled_b = [False]
issue_refs = set()
# we'll read existing IssueRefs from a local cache if possible. this is a
# much faster way to populate issue_refs than querying comicvine...
# 1. get the name of the cache file.
cache_file = __get_cache_file_path(series_ref, '.cache')
if not Directory.Exists(Path.GetDirectoryName(cache_file)):
Directory.CreateDirectory(Path.GetDirectoryName(cache_file))
# 2. read in all issues from the cache, including the 'already_found' list,
# which we treat as though it was part of the cache.
if already_found:
issue_refs = issue_refs.union(already_found)
if File.Exists(cache_file):
with open(cache_file, 'r') as f:
issue_data = f.readline()
while issue_data: # might contain newline or whitespace
if issue_data.strip():
issue_key = issue_data.strip().split(",")[0]
issue_num_s = issue_data.strip().split(",")[1]
newref = IssueRef(issue_num_s.strip(), issue_key.strip())
if not newref in issue_refs:
issue_refs.add( newref );
issue_data = f.readline()
del issue_data
# 3. now do a query to comicvine to see if we should grab some more issues
dom = cvconnection._query_issue_ids_dom_safe(sstr(series_id_n))
if dom is None:
raise Exception("error getting issues in " + sstr(series_ref))
else:
# 4. parse the query results to find the total number of issues that
# comic vine has for our series.
total_to_load_n = 0
if hasattr(dom.results, "__dict__") and \
"issues" in dom.results.__dict__ and \
hasattr(dom.results.issues, "__dict__") and \
"issue" in dom.results.issues.__dict__:
total_to_load_n = len(dom.results.issues.issue) \
if isinstance(dom.results.issues.issue, list) else 1
# 5. now if we didn't get all the issues from the cache, start
# querying comicvine to get them individually
issue_keys = set([ref.issue_key for ref in issue_refs])
expected_issue_keys = set()
loaded_from_web_n = 0
def _grab_issue(issue):
expected_issue_keys.add(issue.id)
if issue.id not in issue_keys:
issue_page = cvconnection._query_issue_number_dom(issue.id)
issue_num_s = issue_page.results.issue_number
if not is_string(issue_num_s): issue_num_s = ''
issue_num_s = issue_num_s.replace('.00', '')
issue_refs.add(IssueRef(issue_num_s, issue.id))
cancelled_b[0] =\
callback_function(float(len(issue_refs))/total_to_load_n)
return 1
else:
return 0
if total_to_load_n == 1:
loaded_from_web_n += _grab_issue(dom.results.issues.issue)
else:
# 5a. pre sort the issues in the results by issue id, as a proxy
# for issue number (which we can't have yet.) this should
# lead to the progress bar counting up mostly (see issue 82)
dom.results.issues.issue.sort(None, lambda iss : int(iss.id))
for issue in dom.results.issues.issue:
loaded_from_web_n += _grab_issue(issue)
if cancelled_b[0]:
break
# 5b. there could be obsolete values in the cache/already_found set.
# if so, remove them from our results.
obsolete_issue_refs = set()
for ref in issue_refs:
if not ref.issue_key in expected_issue_keys:
log.debug("warning: ignoring obsolete issue: ", ref.issue_key)
obsolete_issue_refs.add(ref)
issue_refs = issue_refs - obsolete_issue_refs
# 6. update the cache file so next time this is operation is faster
# even if we are cancelled, this could speed things up next time.
if loaded_from_web_n > 0:
with open(cache_file, 'w') as f:
for ref in issue_refs:
if is_number(ref.issue_num_s) and is_number(ref.issue_key):
f.write(sstr(ref.issue_key) + ',' + sstr(ref.issue_num_s) + '\n')
# 7. a little nice debug output...
#if not cancelled_b[0]:
# log.debug(" ---> SAFE ISSUE QUERY found ", loaded_from_cache_n, " (of ",
# loaded_from_cache_n + loaded_from_web_n, ") issues in the local cache")
return set() if cancelled_b[0] else issue_refs
# =============================================================================
def __get_cache_file_path(series_ref, extension = ''):
'''
Returns the correct cache file path for the given series_ref, with the
given extension. There is no guarantee that this path points to a file
or directory that actually exists.
series_ref -> the SeriesRef that you want a cache file path for.
extension -> the extension to use in the filepath ('.cache', '.dat', etc)
'''
series_id_n = int(series_ref.series_key)
cache_dir = resources.LOCAL_CACHE_DIRECTORY + 'comicvine/'
cache_file = ''
legacy_cache = cache_dir + sstr(series_id_n) + extension
if File.Exists(legacy_cache):
cache_file = legacy_cache
else:
sub_dir = cache_dir + (sstr(abs(series_id_n)).zfill(2))[0:2] + '/'
cache_file = sub_dir + sstr(series_id_n) + extension
return cache_file
# =============================================================================
def _query_image(ref):
'''
This method is the Comic Vine implementation of the identically named
method in the db.py module.
'''
retval = None # the Image object that we will return
# 1. determine the URL
image_url_s = None
if isinstance(ref, SeriesRef):
image_url_s = ref.thumb_url_s
elif isinstance(ref, IssueRef):
dom = cvconnection._query_issue_image_dom(sstr(ref.issue_key))
image_url_s = __issue_parse_image_url(dom) if dom else None
elif is_string(ref):
image_url_s = ref
# 2. attempt to load the image for the URL
if image_url_s:
try:
request = WebRequest.Create(image_url_s)
response = request.GetResponse()
response_stream = response.GetResponseStream()
retval = Image.FromStream(response_stream)
except:
log.debug_exc('ERROR loading cover image from comicvine:')
log.debug('--> imageurl: ', image_url_s)
retval = None
# if this value is stil None, it means an error occurred, or else comicvine
# simply doesn't have any Image for the given ref object
return retval
# =============================================================================
def _query_issue(issue_ref):
'''
This method is the Comic Vine implementation of the identically named
method in the db.py module.
'''
issue = Issue()
dom = cvconnection._query_issue_details_dom(sstr(issue_ref.issue_key))
__issue_parse_simple_stuff(issue, dom)
__issue_parse_series_details(issue, dom)
__issue_parse_story_credits(issue, dom)
__issue_parse_summary(issue, dom)
__issue_parse_roles(issue, dom)
page = cvconnection._query_issue_details_page(sstr(issue_ref.issue_key))
__issue_scrape_extra_details( issue, page )
return issue
#===========================================================================
def __issue_parse_simple_stuff(issue, dom):
''' Parses in the 'easy' parts of the DOM '''
if is_string(dom.results.volume.name):
issue.series_name_s = dom.results.volume.name.strip()
if is_string(dom.results.name):
issue.title_s = dom.results.name.strip()
if is_string(dom.results.id):
issue.issue_key = dom.results.id
if is_string(dom.results.issue_number):
issue.issue_num_s = dom.results.issue_number.replace('.00', '')
if is_string(dom.results.site_detail_url) and \
dom.results.site_detail_url.startswith("http"):
issue.webpage_s = dom.results.site_detail_url
# grab the published year and month
if "publish_year" in dom.results.__dict__ and \
is_string(dom.results.publish_year):
try:
issue.year_s = sstr(int(dom.results.publish_year))
except:
pass # got an unrecognized "year" format...?
if "publish_month" in dom.results.__dict__ and \
is_string(dom.results.publish_month):
try:
issue.month_s = sstr(int(dom.results.publish_month))
except:
pass # got an unrecognized "month" format...?
# grab the image for this issue and store it as the first element
# in the list of issue urls.
image_url_s = __issue_parse_image_url(dom)
if image_url_s:
issue.image_urls.append(image_url_s)
#===========================================================================
def __issue_parse_image_url(dom):
''' Grab the image for this issue out of the given DOM. '''
# the target size for images that we're parsing
IMG_SIZE = "large"
imgurl_s = None
if "image" in dom.results.__dict__:
if "icon_url" in dom.results.image.__dict__ and \
is_string(dom.results.image.icon_url):
imgurl_s = dom.results.image.icon_url.replace(r"icon", IMG_SIZE);
elif "medium_url" in dom.results.image.__dict__ and \
is_string(dom.results.image.medium_url):
imgurl_s=dom.results.image.medium_url.replace(r"medium", IMG_SIZE);
elif "thumb_url" in dom.results.image.__dict__ and \
is_string(dom.results.image.thumb_url):
imgurl_s =dom.results.image.thumb_url.replace(r"thumb", IMG_SIZE);
elif "tiny_url" in dom.results.image.__dict__ and \
is_string(dom.results.image.tiny_url):
imgurl_s = dom.results.image.tiny_url.replace(r"tiny", IMG_SIZE);
elif "super_url" in dom.results.image.__dict__ and \
is_string(dom.results.image.super_url):
imgurl_s = dom.results.image.super_url.replace(r"super", IMG_SIZE);
elif "large_url" in dom.results.image.__dict__ and \
is_string(dom.results.image.large_url):
imgurl_s = dom.results.image.large_url.replace(r"large", IMG_SIZE);
return imgurl_s
#===========================================================================
def __issue_parse_series_details(issue, dom):
''' Parses the current comic's series details out of the DOM '''
series_id = dom.results.volume.id
# if the start year and publisher_s have been cached (because we already
# accessed them once this session) use the cached values. else
# grab those values from comicvine, and cache em so we don't have to
# hit comic vine for them again (at least not in this session)
cache = __series_details_cache
if series_id in cache:
start_year_s = cache[series_id][0]
publisher_s = cache[series_id][1]
else:
# contact comicvine to extract details for this comic book
series_dom = cvconnection._query_series_details_dom(series_id)
if series_dom is None:
raise Exception("can't get details about series " + series_id)
# start year
if "start_year" in series_dom.results.__dict__ and \
is_string(series_dom.results.start_year):
start_year_s = series_dom.results.start_year
else:
start_year_s = ''
# publisher
if "publisher" in series_dom.results.__dict__ and \
"name" in series_dom.results.publisher.__dict__ and \
is_string(series_dom.results.publisher.name):
publisher_s = series_dom.results.publisher.name
else:
publisher_s = ''
cache[series_id] = (start_year_s, publisher_s)
# check if there's the current publisher really is the true publisher, or
# if it's really an imprint of another publisher.
issue.publisher_s = cvimprints.find_parent_publisher(publisher_s)
if issue.publisher_s != publisher_s:
issue.imprint_s = publisher_s
issue.start_year_s = start_year_s
#===========================================================================
def __issue_parse_story_credits(issue, dom):
'''
Parse the current comic's story arc/character/team/location
credits from the DOM.
'''
story_arcs = []
if ("story_arc_credits" in dom.results.__dict__) and \
("story_arc" in dom.results.story_arc_credits.__dict__) :
if type(dom.results.story_arc_credits.story_arc) == type([]):
for arc in dom.results.story_arc_credits.story_arc:
story_arcs.append(arc.name)
elif is_string(dom.results.story_arc_credits.story_arc.name):
story_arcs.append(dom.results.story_arc_credits.story_arc.name)
if len(story_arcs) > 0:
issue.alt_series_name_s = ', '.join(story_arcs)
# corylow: SEPARATION OF CONCERNS: Issue 47 should be solved more generically
# get any character details that might exist
characters = []
if ("character_credits" in dom.results.__dict__) and \
("character" in dom.results.character_credits.__dict__):
if type(dom.results.character_credits.character) == type([]):
for char in dom.results.character_credits.character:
characters.append(char.name)
elif is_string(dom.results.character_credits.character.name):
characters.append( dom.results.character_credits.character.name )
if len(characters) > 0:
characters = [re.sub(r',|;', '', x) for x in characters] # see Issue 47
issue.characters_s = ', '.join(characters)
# get any team details that might exist
teams = []
if ("team_credits" in dom.results.__dict__) and \
("team" in dom.results.team_credits.__dict__):
if type(dom.results.team_credits.team) == type([]):
for team in dom.results.team_credits.team:
teams.append(team.name)
elif is_string(dom.results.team_credits.team.name):
teams.append( dom.results.team_credits.team.name )
if len(teams) > 0:
teams = [re.sub(r',|;', '', x) for x in teams] # see Issue 47
issue.teams_s = ', '.join(teams)
# get any location details that might exist
locations = []
if ("location_credits" in dom.results.__dict__) and \
("location" in dom.results.location_credits.__dict__):
if type(dom.results.location_credits.location) == type([]):
for location in dom.results.location_credits.location:
locations.append(location.name)
elif is_string(dom.results.location_credits.location.name):
locations.append( dom.results.location_credits.location.name )
if len(locations) > 0:
locations = [re.sub(r',|;', '', x) for x in locations] # see Issue 47
issue.locations_s = ', '.join(locations)
#===========================================================================
def __issue_parse_summary(issue, dom):
''' Parse the current comic's summary details from the DOM. '''
# grab the issue description, and do a bunch of modifications and
# replaces to massage it into a nicer "summary" text
# PARAGRAPH = re.compile(r'<br />')
OVERVIEW = re.compile('Overview')
PARAGRAPH = re.compile(r'<[bB][rR] ?/?>|<[Pp] ?>')
NBSP = re.compile(' ?')
MULTISPACES = re.compile(' {2,}')
STRIP_TAGS = re.compile('<.*?>')
if is_string(dom.results.description):
summary_s = OVERVIEW.sub('', dom.results.description)
summary_s = PARAGRAPH.sub('\n', summary_s)
summary_s = STRIP_TAGS.sub('', summary_s)
summary_s = MULTISPACES.sub(' ', summary_s)
summary_s = NBSP.sub(' ' , summary_s)
summary_s = PARAGRAPH.sub('\n', summary_s)
summary_s = summary_s.replace(r'&', '&')
summary_s = summary_s.replace(r'"', '"')
summary_s = summary_s.replace(r'<', '<')
summary_s = summary_s.replace(r'>', '>')
issue.summary_s = summary_s.strip()
#===========================================================================
def __issue_parse_roles(issue, dom):
''' Parse the current comic's roles from the DOM. '''
# this is a dictionary of comicvine role descriptors, mapped to the
# 'issue' attribute names of the member variables that we want to
# assign the associated values to. so any comicvine person with the
# 'coverr' role will, for example, be assigned to the issue.CoverArtist
# attribue.
ROLE_DICT = {'writer':['writer_s'], 'penciler':['penciller_s'], \
'artist':['penciller_s','inker_s'], 'inker':['inker_s'],\
'cover':['cover_artist_s'], 'editor':['editor_s'],\
'colorer':['colorist_s'], 'letterer':['letterer_s']}
# a simple test to make sure that all the values in ROLE_DICT match up
# with members (symbols) in 'issue'. this is to protect against renaming!
test_symbols = [y for x in ROLE_DICT.values() for y in x]
for symbol in test_symbols:
if not symbol in issue.__dict__:
raise Exception("missing symbol")
# For creators, there are several different situations:
# 1) if there is one or more than one creator
# 2) if a given creator has one or more than one role
rolemap = dict([(r, []) for l in ROLE_DICT.values() for r in l])
if "person_credits" in dom.results.__dict__ and \
"person" in dom.results.person_credits.__dict__:
people = []
if type(dom.results.person_credits.person) == type([]):
people = dom.results.person_credits.person # a list of 'persons'
elif dom.results.person_credits.person is not None:
people = [dom.results.person_credits.person] # a 'person'
for person in people:
roles = []
if "roles" in person.__dict__:
if "role" in person.roles.__dict__ and \
type(person.roles.role) == type([]):
roles = person.roles.role # a list of strings
elif is_string(person.roles):
roles = [person.roles] # a string
for role in roles:
if role in ROLE_DICT:
# corylow: SEPARATION OF CONCERNS: Issue 47
# should be solved more generically:
name = re.sub(r',|;', '', person.name) # see issue 47
for cr_role in ROLE_DICT[role]:
rolemap[cr_role].append(name)
for role in rolemap:
setattr(issue, role, ', '.join(rolemap[role]) )
#===========================================================================
def __issue_scrape_extra_details(issue, page):
''' Parse additional details from the issues ComicVine webpage. '''
if page:
# first pass: find all the alternate cover image urls
regex = re.compile( \
r'(?mis)\<\s*div[^\>]*content-pod alt-cover[^\>]+\>.*?div(.*?)div')
for div_s in re.findall( regex, page ):
inner_search_results = re.search(\
r'(?i)\<\s*img\s+.*src\s*=\s*"([^"]*)', div_s)
if inner_search_results:
image_url_s = inner_search_results.group(1)
if image_url_s:
image_url_s = re.sub(r"_super", r"_large", image_url_s)
issue.image_urls.append(image_url_s)
# second pass: find the community rating (stars) for this comic
regex = re.compile( \
r'(?mis)\<span.*?>\s*user rating[\s-]+\d+\s+'
+r'votes[\s,]+([\d\.]+)[\s\w\.]*\</span>')
results = re.search( regex, page )
if results:
try:
rating = float(results.group(1))
if rating > 0:
issue.rating_n = rating
except:
log.debug_exc("Error parsing rating for " + sstr(issue) + ": ")
| Python |
#####################################################################################
#
# Copyright (c) Harry Pierson. All rights reserved.
#
# This source code is subject to terms and conditions of the Microsoft Public License.
# A copy of the license can be found at http://opensource.org/licenses/ms-pl.html
# By using this source code in any fashion, you are agreeing to be bound
# by the terms of the Microsoft Public License.
#
# You must not remove this notice, or any other, from this software.
#
#####################################################################################
import clr
clr.AddReference('System.Xml')
from System.String import IsNullOrEmpty
from System.Xml import XmlReader, XmlNodeType
from System.IO import StringReader
class XmlNode(object):
def __init__(self, xr):
self.name = xr.LocalName
self.namespace = xr.NamespaceURI
self.prefix = xr.Prefix
self.value = xr.Value
self.nodeType = xr.NodeType
if xr.NodeType == XmlNodeType.Element:
self.attributes = []
while xr.MoveToNextAttribute():
if xr.NamespaceURI == 'http://www.w3.org/2000/xmlns/':
continue
self.attributes.append(XmlNode(xr))
xr.MoveToElement()
@property
def xname(self):
if IsNullOrEmpty(self.namespace):
return self.name
return "{%(namespace)s}%(name)s" % self.__dict__
def parse(xml):
xr = XmlReader.Create(xml)
while xr.Read():
xr.MoveToContent()
node = XmlNode(xr)
yield node
if (xr.IsEmptyElement):
node.nodeType = XmlNodeType.EndElement
del node.attributes
yield node
def parseString(xml):
return parse(StringReader(xml))
if __name__ == "__main__":
nodes = parse('http://feeds.feedburner.com/Devhawk') | Python |
'''
This module contains useful canned methods for accessing the Comic Vine
database (API) over the Internet. The documentation for this API and the
related queries can be found at: http://api.comicvine.com/documentation/
@author: Cory Banack
'''
import clr
import log
import xml2py
from utils import sstr
from dbmodels import DatabaseConnectionError
clr.AddReference('System')
from System import Text
from System.Net import WebException, WebRequest
from System.IO import IOException, StreamReader
# This is the api key needed to access the comicvine website.
# This key belongs to Cory Banack. If you fork this code to make your
# own scraper, please obtain and use your own (free) API key from comicvine
_API_KEY = '4192f8503ea33364a23035827f40d415d5dc5d18'
# =============================================================================
def _query_series_ids_dom(searchterm_s, skip_n=0):
'''
Performs a query that will obtain a dom containing all the comic book series
from Comic Vine that match a given search string. You can also provide a
second argument that specifies the number of results to skip over (i.e. 3
means skip results 0, 1, and 2, so the first returned result is number 3.)
This is useful, because this query will not necessarily return all available
results.
This method doesn't return null, but it may throw Exceptions.
'''
# {0} is the search string, {1} is the number of issues to skip over
_QUERY_URL_SERIES_ID = \
'http://api.comicvine.com/search/?api_key=' + _API_KEY + '&format=xml&' +\
'limit=20&resources=volume&field_list=name,start_year,publisher,id,'+\
'image,count_of_issues&query={0}&offset={1}'
if searchterm_s is None or searchterm_s == '' or skip_n < 0:
raise ValueError('bad parameters')
return __get_dom( _QUERY_URL_SERIES_ID.format(searchterm_s, skip_n) )
# =============================================================================
def _query_series_details_dom(seriesid_s):
'''
Performs a query that will obtain a dom containing the start year and
publisher for the given series ID.
This method doesn't return null, but it may throw Exceptions.
'''
# {0} is the series id, an integer.
_QUERY_URL_SERIES_DETAILS =\
'http://api.comicvine.com/volume/{0}/?api_key=' + _API_KEY + \
'&format=xml&field_list=start_year,publisher'
if seriesid_s is None or seriesid_s == '':
raise ValueError('bad parameters')
pass
return __get_dom( _QUERY_URL_SERIES_DETAILS.format(sstr(seriesid_s) ) )
# =============================================================================
def _query_issue_ids_dom_fast(seriesname_s, skip_n=0):
'''
Performs a query that will obtain a dom containing a portion of the issue IDs
for the given series name. You can also provide a second argument that
specifies the number of results to skip over (i.e. 3 means skip results 0,
1, and 2, so the first returned result is number 3.)
This method is called 'fast' because the returned dom contains issue
numbers, so the caller can save a LOT of time by not having to query
each issue id to find its issue number. The downside is that many of the
returned issues may not belong to the right series (they must be filtered
out) and in some cases, the returned list of issues will be incomplete.
See _query_issue_id_dom_safe for a slower, better behaved alternative.
This method doesn't return null, but it may throw Exceptions.
'''
# {0} is the series name (a search string), and skip_n is an integer
_QUERY_URL_ISSUE_ID =\
'http://api.comicvine.com/search/?api_key=' + _API_KEY + \
'&format=xml&resources=issue&query={0}&offset={1}' + \
'&field_list=volume,issue_number,id'
if seriesname_s is None or seriesname_s == '' or skip_n < 0:
raise ValueError('bad parameters')
return __get_dom(
_QUERY_URL_ISSUE_ID.format(sstr(seriesname_s), sstr(skip_n)))
# =============================================================================
def _query_issue_ids_dom_safe(seriesid_s):
'''
Performs a query that will obtain a dom containing all of the issue IDs
for the given series name.
This method is called 'safe' because the returned dom contains all issues ids
for the requested series id, and nothing else. It is very slow, though,
because the caller must still query each issue id to find its issue number.
See _query_issue_id_dom_fast for a faster alternative.
This method doesn't return null, but it may throw Exceptions.
'''
# {0} is the series ID, an integer
_QUERY_URL_ISSUE_ID =\
'http://api.comicvine.com/volume/{0}/?api_key=' + _API_KEY + \
'&format=xml&field_list=issues'
if seriesid_s is None or seriesid_s == '':
raise ValueError('bad parameters')
return __get_dom( _QUERY_URL_ISSUE_ID.format(sstr(seriesid_s) ) )
# =============================================================================
def _query_issue_details_dom(issueid_s):
'''
Performs a query that will obtain a dom containing the ComicVine API details
for given issue.
Never returns null, but may throw exceptions if there are problems.
'''
# {0} is the issue ID
_QUERY_URL_ISSUE_DETAILS =\
'http://api.comicvine.com/issue/{0}/?api_key=' + _API_KEY + '&format=xml'
if issueid_s is None or issueid_s == '':
raise ValueError('bad parameters')
url = _QUERY_URL_ISSUE_DETAILS.format(sstr(issueid_s) )
return __get_dom(url)
# =============================================================================
def _query_issue_details_page(issueid_s):
'''
Performs a query that will obtain the the ComicVine website details for the
given issue. The details are returned in the form of an html string (a
page) that can be scraped for info.
Never returns null, but may throw exceptions if there are problems.
'''
# {0} is the issue ID
_QUERY_URL_ISSUE_HTML = 'http://www.comicvine.com/issue/37-{0}/'
if issueid_s is None or issueid_s == '':
return None
url = _QUERY_URL_ISSUE_HTML.format(sstr(issueid_s))
retval = __get_page(url)
if retval:
return retval
else:
raise DatabaseConnectionError('comicvine', url,
Exception('comicvine website returned an empty document'))
# =============================================================================
def _query_issue_number_dom(issueid_s):
'''
Performs a query that will grab a dom containing the issue number for a
specific issue ID.
This method doesn't return null, but it may throw Exceptions.
'''
# {0} is the issue ID
_QUERY_URL_ISSUE_NUMBER =\
'http://api.comicvine.com/issue/{0}/?api_key=' + _API_KEY + \
'&format=xml&field_list=issue_number,publish_month,publish_year'
if issueid_s is None or issueid_s == '':
raise ValueError('bad parameters')
return __get_dom( _QUERY_URL_ISSUE_NUMBER.format(sstr(issueid_s) ) )
# =============================================================================
def _query_issue_image_dom(issueid_s):
'''
Performs a query that will obtain a dom containing the issue image url
for the given issue ID.
'''
# {0} is the issue ID
_QUERY_URL_ISSUE_IMAGE =\
'http://api.comicvine.com/issue/{0}/?api_key=' + _API_KEY + \
'&format=xml&field_list=image'
if issueid_s is None or issueid_s == '':
raise ValueError('bad parameters')
return __get_dom( _QUERY_URL_ISSUE_IMAGE.format(sstr(issueid_s) ) )
# =============================================================================
def __get_dom(url, lasttry=False):
'''
Obtains a parsed DOM tree from the XML at the given URL.
Never returns null, but may throw an exception if it has any problems
downloading or parsing the XML.
'''
retval = None
xml = __get_page( url )
if xml is None or not xml.strip():
msg = 'comicvine query returned an empty document: ' + url
if lasttry:
raise Exception(msg)
else:
log.debug('ERROR: ', msg)
retval = None
else:
try:
xml = __strip_invalid_xml_chars(xml)
dom = xml2py.parseString(xml)
if int(dom.status_code) == 1:
retval = dom # success
else:
if lasttry:
raise _ComicVineError(dom.status_code, dom.error, url)
else:
log.debug()
retval = None
except Exception, ex:
if lasttry:
raise ex
else:
log.debug_exc('ERROR: cannot parse results from comicvine: ' + url)
retval = None
# this next is an attempt to deal with issues #28 and #39. sometimes
# read_url does not seem to get a fully formed version of the xml file,
# or it even gets and empty file. in such cases, a re-query normally
# solves the problem.
if not retval and not lasttry:
log.debug('RETRYING the query...')
retval = __get_dom(url, True)
return retval
# =============================================================================
def __get_page(url):
'''
Reads the webpage at the given URL into a new string, which is returned.
The returned value may be None if a problem is encountered, OR an exception
may be thrown. If the exception is a DatabaseConnectionError, that
represents an actual network problem connecting to the Comic Vine database.
'''
try:
request = WebRequest.Create(url.replace(' ', '%20'))
response = request.GetResponse()
responseStream = response.GetResponseStream()
reader = StreamReader(responseStream, Text.Encoding.UTF8)
page = reader.ReadToEnd()
except (WebException, IOException) as wex:
# this type of exception almost certainly means that the user's internet
# is broken or the comicvine website is down. so wrap it in a nice,
# recognizable exception before rethrowing it, so that error handlers can
# recognize it and handle it differently than other, more unexpected
# exceptions.
raise DatabaseConnectionError("Comic Vine", url, wex)
finally:
if 'reader' in vars(): reader.Close()
if 'responseStream' in vars(): responseStream.Close()
if 'response' in vars(): response.Close()
return page
# =============================================================================
def __strip_invalid_xml_chars(xml):
'''
Removes any invalid xml characters (unfortunately, Comic Vine DOES allow
them, see issue 51) from the given xml string. Thanks:
http://cse-mjmcl.cse.bris.ac.uk/blog/2007/02/14/1171465494443.html
'''
def is_valid_xml(c):
return c == 0x9 or c == 0xA or c == 0xD or\
(c >= 0x20 and c <= 0xD7FF) or\
(c >= 0xE000 and c <= 0xFFFD) or\
(c >= 0x10000 and c <= 0x10FFFF)
if xml:
xml = ''.join([c for c in xml if is_valid_xml(ord(c))])
return xml
# =============================================================================
class _ComicVineError(Exception):
'''
A special exception that gets thrown anytime when there is a semantic error
with a query to the comicvine database; that is, an error wherein comic
vine returns an error code.
You can get the error code and name as a tuple via the get_error() method.
'''
def __init__(self, error_code, error_name, url):
'''
error_code => the integer comic vine error code
error_name => the string name of the comic vine error code
url => the url that caused the problem
'''
self._error = int(error_code), error_name
msg = 'code {0}: "{1}" for: {2}'.format(error_code, error_name, url)
super(Exception,self).__init__( msg )
# ==========================================================================
def get_error(self):
'''
Returns a tuple containing the integer comic vine error code that was used
to construct this object, followed by the associated string error name.
'''
return self._error
| Python |
#####################################################################################
#
# Copyright (c) Harry Pierson. All rights reserved.
#
# This source code is subject to terms and conditions of the Microsoft Public License.
# A copy of the license can be found at http://opensource.org/licenses/ms-pl.html
# By using this source code in any fashion, you are agreeing to be bound
# by the terms of the Microsoft Public License.
#
# You must not remove this notice, or any other, from this software.
#
#####################################################################################
import ipypulldom
from System.Xml import XmlNodeType
class _type_factory(object):
class _type_node(object):
def __init__(self, node):
ty = type(node)
self.name = ty.__name__
self.namespace = ty.xmlns
def __init__(self):
self.types = {}
def find_type(self, node, parent):
def create_type(node, parent):
return type(node.name, (parent,), {'xmlns':node.namespace})
if parent not in self.types:
self.types[parent] = {}
tp = self.types[parent]
if node.name not in tp:
tp[node.name] = [create_type(node, parent)]
tpn = tp[node.name]
for t in tpn:
if t.xmlns == node.namespace:
return t
#if there's no matching namespace type, create one and add it to the list
new_type = create_type(node, parent)
tpn.append(new_type)
return new_type
def __call__(self, node, parent=object):
if isinstance(node, ipypulldom.XmlNode):
return self.find_type(node, parent)
return self.find_type(self._type_node(node), parent)
xtype = _type_factory()
def xml2py(nodelist):
def children(nodelist):
while True:
child = xml2py(nodelist)
if child is None:
break
yield child
def set_attribute(parent, child):
name = type(child).__name__
if not hasattr(parent, name):
setattr(parent, name, child)
else:
val = getattr(parent, name)
if isinstance(val, list):
val.append(child)
else:
setattr(parent, name, [val, child])
node = nodelist.next()
if node.nodeType == XmlNodeType.EndElement:
return None
elif node.nodeType == XmlNodeType.Text or node.nodeType == XmlNodeType.CDATA:
return node.value
elif node.nodeType == XmlNodeType.Element:
#create a new object type named for the element name
cur = xtype(node)()
cur._nodetype = XmlNodeType.Element
#collect all the attributes and children in lists
attributes = [xtype(attr, str)(attr.value) for attr in node.attributes]
children = [child for child in children(nodelist)]
if len(children) == 1 and isinstance(children[0], str):
#fold up elements with a single text node
cur = xtype(cur, str)(children[0])
cur._nodetype = XmlNodeType.Element
else:
#otherwise, add child elements as properties on the current node
for child in children:
set_attribute(cur, child)
for attr in attributes:
attr._nodetype = XmlNodeType.Attribute
set_attribute(cur, attr)
return cur
def parse(xml):
return xml2py(ipypulldom.parse(xml))
def parseString(xml):
return xml2py(ipypulldom.parseString(xml))
if __name__ == '__main__':
rss = parse('http://feeds.feedburner.com/Devhawk')
for item in rss.channel.item:
print item.title
| Python |
'''
This module contains the ComicBook class, which represents a comic book
from ComicRack that we are scraping data into.
@author: Cory Banack
'''
#import clr
import re
import log
from time import strftime
from utils import sstr
import db
import utils
#==============================================================================
class ComicBook(object):
#===========================================================================
def __init__(self, cr_book, scraper):
if not cr_book:
raise Exception("invalid backing comic book")
self.__cr_book = cr_book
self.__comicrack = scraper.comicrack
# we keep our own copy of series name and issue number, because sometimes
# we have to "repair" them. these values are immutable after this point.
self.__series_s = self.__cr_book.ShadowSeries.strip()
self.__issue_num_s = self.__cr_book.ShadowNumber.strip()
self.__repair_bad_filename_parsing()
# adapter properties that provide read-only access to this ComicBook's
# backing ComicRack comic book object # coryhigh: comment these
series_s = property( lambda self : self.__series_s )
issue_num_s = property( lambda self : self.__issue_num_s )
volume_n = property( lambda self : self.__cr_book.ShadowVolume )
format_s = property( lambda self : self.__cr_book.ShadowFormat )
year_n = property( lambda self : self.__cr_book.ShadowYear )
tags_s = property( lambda self : self.__cr_book.Tags ) # comma separated
notes_s = property( lambda self : self.__cr_book.Notes )
uuid_s = property( lambda self : utils.sstr(self.__cr_book.Id) )
filename_s = property( lambda self : self.__cr_book.FileName )
filename_ext_s = property(lambda self : self.__cr_book.FileNameWithExtension)
#==========================================================================
def unique_series_s(self):
'''
The unique series name for this ComicBook. This is a special
string such that any books that "appear" to be from the same series will
have the same unique series name, and any that appear to be from
different series will have different unique series names.
This value is not the same as the series_s property. It takes that
property (the series name) into account, but it ALSO considers other
values that may differentiate series with the same name, like volume
and format. It is also guaranteed to produce a unique value even if
all other data in this ComicBook is empty.
The unique series name is meant to be used internally (i.e. the key for
a map, or for grouping ComicBooks), not for displaying to users.
'''
sname = '' if not self.series_s else self.series_s
if sname and self.format_s:
sname += self.format_s
sname = re.sub('\W+', '', sname).lower()
svolume = ''
if sname:
if self.volume_n and self.volume_n > 0:
svolume = "[v" + sstr(self.volume_n) + "]"
else:
# if we can't find a name at all (very weird), fall back to the
# ComicRack ID, which should be unique and thus ensure that this
# comic doesn't get lumped in to the same series choice as any
# other unnamed comics!
sname = self.uuid_s
return sname + svolume
#==========================================================================
def get_cover_image(self):
'''
Retrieves an COPY (a .NET Image object) of the cover page for this
ComicBook. Returns null if one could not be obtained for any reason.
'''
book_name = self.filename_ext_s
fileless = False if book_name else True
cover_image = None
if fileless:
cover_image = None
else:
cover_index = 0
if self.__cr_book.FrontCoverPageIndex > 0:
cover_index = self.__cr_book.FrontCoverPageIndex
cover_image = \
self.__comicrack.App.GetComicPage( self.__cr_book, cover_index )
cover_image = utils.strip_back_cover(cover_image)
return cover_image
#==============================================================================
def save_issue(self, issue, scraper):
'''
Copies all data in the given issue into this ComicBook object, respecting
all of the overwrite/ignore rules defined in the given Configuration
object.
As a side-effect, some detailed debug log information about the new values
is also produced.
'''
log.debug("setting values for this comic book ('*' = changed):")
config = scraper.config
cb = ComicBook
book = self.__cr_book # coryhigh: fix this
# series ---------------------
value = cb.__massage_new_string("Series", issue.series_name_s, \
book.Series, config.update_series_b, config.ow_existing_b, \
True ) # note: we ALWAYS ignore blanks for 'series'!
if ( value is not None ) : book.Series = value
# issue number ---------------
value = cb.__massage_new_string("Issue Number", issue.issue_num_s, \
book.Number, config.update_number_b, config.ow_existing_b, \
True ) # note: we ALWAYS ignore blanks for 'issue number'!
if ( value is not None ) : book.Number = value
# title ----------------------
value = cb.__massage_new_string("Title", issue.title_s, book.Title, \
config.update_title_b, config.ow_existing_b, config.ignore_blanks_b )
if ( value is not None ) : book.Title = value
# alternate series -----------
value = cb.__massage_new_string("Alt/Arc", issue.alt_series_name_s, \
book.AlternateSeries, config.update_alt_series_b, \
config.ow_existing_b, config.ignore_blanks_b )
if ( value is not None ) : book.AlternateSeries = value
# summary --------------------
value = cb.__massage_new_string("Summary", issue.summary_s, \
book.Summary, config.update_summary_b, config.ow_existing_b, \
config.ignore_blanks_b )
if ( value is not None ) : book.Summary = value
# year -----------------------
value = cb.__massage_new_number("Year", issue.year_s, \
book.Year, config.update_year_b, config.ow_existing_b, \
True, -1, lambda x : x > 0 ) # note: we ALWAYS ignore blanks for 'year'
if ( value is not None ) : book.Year = value
# month ----------------------
def remap(month):
# months 1 to 12 are straightforward, but the remaining possible
# values (listed below) must be converted into the range 1-12:
# 13 - Spring 18 - None 23 - Sep/Oct 28 - Apr/May
# 14 - Summer 19 - Jan/Feb 24 - Nov/Dec 29 - Jun/Jul
# 15 - Fall 20 - Mar/Apr 25 - Holiday 30 - Aug/Sep
# 16 - Winter 21 - May/Jun 26 - Dec/Jan 31 - Oct/Nov
# 17 - Annual 22 - Jul/Aug 27 - Feb/Mar
remap={ 1:1, 26:1, 2:2, 19:2, 3:3, 13:3, 27:3, 4:4, 20:4, 5:5, 28:5, \
6:6, 14:6, 21:6, 7:7, 29:7, 8:8, 22:8, 9:9, 15:9, 30:9, 10:10,\
23:10, 11:11, 31:11, 12:12, 16:12, 24:12, 25:12 }
retval = -1;
if month in remap:
retval = remap[month]
return retval
value = cb.__massage_new_number("Month", issue.month_s, book.Month, \
config.update_month_b, config.ow_existing_b, True, -1, \
lambda x : x>=1 and x <=12, remap ) # ALWAYS ignore blanks for 'month'
if ( value is not None ) : book.Month = value
# volume --------------------
value = cb.__massage_new_number("Volume", issue.start_year_s, \
book.Volume, config.update_volume_b, config.ow_existing_b, \
config.ignore_blanks_b, -1, lambda x : x > 0 )
if ( value is not None ) : book.Volume = value
# if we found an imprint for this issue, the user may prefer that the
# imprint be listed as the publisher (instead). if so, make that change
# before writing the 'imprint' and 'publisher' fields out to the book:
if not config.convert_imprints_b and issue.imprint_s:
issue.publisher_s = issue.imprint_s
issue.imprint_s = ''
# imprint -------------------
value = cb.__massage_new_string("Imprint", issue.imprint_s, \
book.Imprint, config.update_imprint_b, \
config.ow_existing_b, config.ignore_blanks_b )
if ( value is not None ) : book.Imprint = value
# publisher -----------------
value = cb.__massage_new_string("Publisher", issue.publisher_s, \
book.Publisher, config.update_publisher_b, \
config.ow_existing_b, config.ignore_blanks_b )
if ( value is not None ) : book.Publisher = value
# characters ----------------
value = cb.__massage_new_string("Characters", issue.characters_s, \
book.Characters, config.update_characters_b, \
config.ow_existing_b, config.ignore_blanks_b )
if ( value is not None ) : book.Characters = value
# teams ----------------
value = cb.__massage_new_string("Teams", issue.teams_s, \
book.Teams, config.update_teams_b, \
config.ow_existing_b, config.ignore_blanks_b )
if ( value is not None ) : book.Teams = value
# locations ----------------
value = cb.__massage_new_string("Locations", issue.locations_s, \
book.Locations, config.update_locations_b, \
config.ow_existing_b, config.ignore_blanks_b )
if ( value is not None ) : book.Locations = value
# webpage ----------------
value = cb.__massage_new_string("Webpage", issue.webpage_s, \
book.Web, config.update_webpage_b, \
config.ow_existing_b, config.ignore_blanks_b )
if ( value is not None ) : book.Web = value
# rating -----------------------
value = cb.__massage_new_number("Rating", issue.rating_n, \
float(book.CommunityRating), config.update_rating_b,
config.ow_existing_b, config.ignore_blanks_b, 0.0,
lambda x: x >= 0 and x <= 5, lambda x : max(0.0,min(5.0,x)) )
if ( value is not None ) : book.CommunityRating = value
# writer --------------------
value = cb.__massage_new_string("Writers", issue.writer_s, \
book.Writer, config.update_writer_b, \
config.ow_existing_b, config.ignore_blanks_b )
if ( value is not None ) : book.Writer = value
# penciller -----------------
value = cb.__massage_new_string("Pencillers", issue.penciller_s, \
book.Penciller, config.update_penciller_b, \
config.ow_existing_b, config.ignore_blanks_b )
if ( value is not None ) : book.Penciller = value
# inker ---------------------
value = cb.__massage_new_string("Inkers", issue.inker_s, \
book.Inker, config.update_inker_b, \
config.ow_existing_b, config.ignore_blanks_b )
if ( value is not None ) : book.Inker = value
# colorist -----------------
value = cb.__massage_new_string("Colorists", issue.colorist_s, \
book.Colorist, config.update_colorist_b, \
config.ow_existing_b, config.ignore_blanks_b )
if ( value is not None ) : book.Colorist = value
# letterer -----------------
value = cb.__massage_new_string("Letterers", issue.letterer_s, \
book.Letterer, config.update_letterer_b, \
config.ow_existing_b, config.ignore_blanks_b )
if ( value is not None ) : book.Letterer = value
# coverartist --------------
value = cb.__massage_new_string("CoverArtists", issue.cover_artist_s, \
book.CoverArtist, config.update_cover_artist_b, \
config.ow_existing_b, config.ignore_blanks_b )
if ( value is not None ) : book.CoverArtist = value
# editor -------------------
value = cb.__massage_new_string("Editors", issue.editor_s, \
book.Editor, config.update_editor_b, \
config.ow_existing_b, config.ignore_blanks_b )
if ( value is not None ) : book.Editor = value
# tag ----------------------
# ===== corylow: SEPARATION OF CONCERNS: this assumes CV! ==================
# add in our own special tag that can be parsed back in when the user
# rescrapes this book. we assume any other existing tags were either 1)
# added by the user (so instead of replacing them, we append to them) or
# 2) added by this script (so we replace the part we added with our new
# tag, instead of appending.)
new_tag = 'CVDB' + sstr(issue.issue_key)
if book.Tags:
book.Tags = book.Tags.strip()
regexp = re.compile(r"(?i)CVDB(\d+|SKIP)")
matches = regexp.search(book.Tags)
if matches:
new_tag = re.sub(regexp, new_tag, book.Tags)
else:
if book.Tags[-1] == ",":
book.Tags = book.Tags[:-1]
new_tag = book.Tags +", " + new_tag
value = cb.__massage_new_string("Tags", new_tag, \
book.Tags, config.rescrape_tags_b, config.ow_existing_b, \
config.ignore_blanks_b )
if ( value is not None ) : book.Tags = value
# notes --------------------
# ===== corylow: SEPARATION OF CONCERNS: this assumes CV! ==================
# add in our own special sentence that can be parsed back in when the user
# rescrapes this book. we assume any other existing characters were
# either 1) added by the user (so instead of replacing them, we append
# to them) or 2) added by this script (so we replace the part we added
# with our new sentence, instead of appending.)
new_notes = 'Scraped metadata from ComicVine [%s] on %s.' % \
('CVDB' + sstr(issue.issue_key), strftime(r'%Y.%m.%d %X'))
if book.Notes:
book.Notes = book.Notes.strip()
regexp = re.compile(
r"(?i)Scraped.*?CVDB(\d+|SKIP).*?[\d\.]{8,} [\d:]{6,}\.")
matches = regexp.search(book.Notes)
if matches:
# found the 'standard' embedded CVDB tag; update it
new_notes = re.sub(regexp, new_notes, book.Notes)
else:
regexp = re.compile(r"(?i)CVDB(\d+|SKIP)")
matches = regexp.search(book.Notes)
if matches:
# found a custom embedded CVDB tag; update it
new_notes = 'CVDB' + sstr(issue.issue_key)
new_notes = re.sub(regexp, new_notes, book.Notes)
else:
# found no embedded CVDB tag; add it
new_notes = book.Notes + "\n\n" + new_notes
value = cb.__massage_new_string("Notes", new_notes, \
book.Notes, config.rescrape_notes_b, config.ow_existing_b, \
config.ignore_blanks_b )
if ( value is not None ) : book.Notes = value
del value
book.WriteProposedValues(False)
self.__maybe_download_thumbnail(issue, scraper)
#===========================================================================
def __maybe_download_thumbnail(self, issue, scraper):
'''
Iff this is a 'fileless' ComicBook, this method downloads and stores a
cover image into it (assuming the given scraper's config settings permit.)
Otherwise, this method does nothing.
'book' -> the book whose thumbnail we might update
'issue' -> the Issue for the book that we are updating
'scraper' -> the ScraperEnginer that's current running
'''
config = scraper.config
label = "Thumbnail"
already_has_thumb = self and self.__cr_book.CustomThumbnailKey
book_is_fileless = self and not self.__cr_book.FilePath
# don't download the thumbnail if the book has a backing file (cause that's
# where the thumbnail will come from!) or if the user has specified in
# config that we shouldn't.
if not book_is_fileless or not config.download_thumbs_b or \
(already_has_thumb and config.preserve_thumbs_b):
log.debug("--> ", label.ljust(15), ": --- skipped ---")
else:
# get the url for this issue's cover art. check to see if the user
# picked an alternate cover back when they closed the IssueForm, and if
# not, just grab the url for the default cover art.
url = None
alt_cover_key = sstr(issue.issue_key)+"-altcover"
if alt_cover_key in config.session_data_map:
url = config.session_data_map[alt_cover_key]
if not url and len(issue.image_urls):
url = issue.image_urls[0]
if not url:
# there is no image url available for this issue
log.debug("--> ", label.ljust(15),": --- not available! ---")
else:
image = db.query_image(url)
if not image:
log.debug("ERROR: couldn't download thumbnail: ", url)
else:
cr = scraper.comicrack.App
success = cr.SetCustomBookThumbnail(self.__cr_book, image)
if success:
# it worked!
log.debug("--> *", label.ljust(15),": ", url)
else:
log.debug("ERROR: comicrack can't set thumbnail!")
#===========================================================================
@staticmethod
def __massage_new_string(
label, new_value, old_value, update, overwrite, ignoreblanks):
'''
Returns a string value that should be copied into our backing ComicBook
object, IFF that string value is not None. Uses a number of rules to
decide what value to return.
label - a human readable description of the given string being changed.
new_value - the proposed new string value to copy over.
old_value - the original value that the new value would copy over.
update - if false, this method always returns None
overwrite - whether it's acceptable to overwrite the old value with the
new value when the old value is non-blank.
ignoreblanks - if true, we'll never overwrite with an old non-blank value
with a new, blank value..
'''
# first, a little housekeeping so that we stay really robust
if new_value is None:
new_value = ''
if old_value is None:
old_value = ''
if not isinstance(new_value, basestring) or \
not isinstance(old_value,basestring):
raise TypeError("wrong types for this method (" + label +")")
old_value = old_value.strip();
new_value = new_value.strip();
# now decide about whether or not to actually do the update
# only update if all of the following are true:
# 1) the update option is turned on for this particular field
# 2) we can overwrite the existing value, or there is no existing value
# 3) we're not overwriting with a blank value unless we're allowed to
retval = None;
if update and (overwrite or not old_value) and \
not (ignoreblanks and not new_value):
retval = new_value
marker = ' '
if old_value != new_value:
marker = '*'
chars = retval.replace('\n', ' ')
if len(chars) > 70:
chars = chars[:70] + " ..."
log.debug("--> ", marker, label.ljust(15), ": ", chars)
else:
log.debug("--> ", label.ljust(15), ": --- skipped ---")
return retval
#===========================================================================
@staticmethod
def __massage_new_number(label, new_value, old_value, update, overwrite, \
ignoreblanks, blank_value, is_valid=None, remap_invalid=None):
'''
Returns an number (int or float) value that should be copied into our
backing ComicBook object, IFF that value is not None. Uses a number of
rules to decide what value to return.
label - a human readable description of the given number being changed.
new_value - the proposed new number value to copy over.
old_value - the original value that the new value would copy over.
update - if false, this method always returns None
overwrite - whether it's acceptable to overwrite the old value with the
new value when the old value is non-blank.
ignoreblanks - if true, we'll never overwrite with an old non-blank value
with a new, blank value.
blank_value - the number value that should be considered 'blank' (0? -1?)
is_valid - an optional single argument function that decides if the given
int return value is valid. If not, it is changed to 'blank_value'
before it is returned, OR if possible it is remapped with...
remap_invalid - an optional single argument function that converts the
given invalid return value (according to 'is_valid') into a
new, valid return value.
'''
# first, a little housekeeping so that we stay really robust
if new_value is None:
new_value = blank_value;
if old_value is None:
old_value = blank_value;
if type(blank_value) != int and type(blank_value) != float:
raise TypeError("wrong type for blank value");
if type(old_value) != int and type(old_value) != float:
raise TypeError("wrong type for old value");
if type(old_value) != type(blank_value):
raise TypeError("blank type is invalid type");
if type(old_value) != type(new_value):
try:
if isinstance(old_value, int):
new_value = int(new_value)
else:
new_value = float(new_value)
except:
log.debug("--> WARNING: can't convert '", new_value,
"' into a ", type(old_value) )
new_value = blank_value
# check for (and possibly repair) the validity of the new_value
if is_valid:
if not is_valid(new_value):
if remap_invalid:
new_value = remap_invalid(new_value)
if not is_valid(new_value):
new_value = blank_value
# now decide about whether or not to actually do the update
# only update if all of the following are true:
# 1) the update option is turned on for this particular field
# 2) we can overwrite the existing value, or there is no existing value
# 3) we're not overwriting with a blank value unless we're allowed to
retval = None;
if update and (overwrite or old_value == blank_value) and \
not (ignoreblanks and new_value == blank_value):
retval = new_value
marker = ' '
if old_value != new_value:
marker = '*'
if retval == blank_value:
log.debug("--> ", marker, label.ljust(15), ": ")
else:
log.debug("--> ", marker, label.ljust(15), ": ", retval)
else:
log.debug("--> ", label.ljust(15), ": --- skipped ---")
# last minute type checking, just to be sure the returned value type is good
if retval != None:
retval = float(retval) if type(old_value) == float else int(retval)
return retval
# ==========================================================================
def __repair_bad_filename_parsing(self):
'''
Occasionally the ComicRack parser doesn't do a good job parsing in the
series name and issue number (both critical bits of data for our
purposes!). Ideally, this would be fixed in ComicRack, but since that
doesn't seem like it's gonna happen, I'll patch up know problems in this
method instead.
'''
# fix parsing for filenames like: "2000AD 1234 (1-1-11).cbz"
match = re.match(r"2000\s*a[\s\.]*d[\s\.](\d+).*", self.filename_s, re.I)
if match:
self.__series_s = "2000AD"
self.__issue_num_s = match.group(1)
# comicrack doesn't recognize the "TPB" (trade paperback) format marker
# on the end of the series name, so it doesn't strip it off properly.
if self.series_s.lower().endswith(" tpb"):
self.__series_s = self.series_s[:-4]
| Python |
"""
This module installs a GLOBAL logging system into an application. Any
information that is written out using the debug(), debug_exc(), or
handle_error methods will get printed to stdout.
All information that is written to stderr or stdout (by this class, or any other
mechanism) is also saved in memory, so that it can be written out to a file in
its entirety at any time by the dump() method.
USAGE
To make the methods in this class usable, call install(). Use a
try-finally to GUARANTEE that uninstall() will be called when your program
completes (with or without errors). This is VITAL in order to ensurethat
all system resources are freed and returned to their original state!
THREAD SAFETY
This module is threadsafe while running, but is not when during install() and
uninstall(), so care should be taken not to call any other method in this
module while either of those two methods are running.
@author: Cory Banack
"""
#corylow: comment and cleanup this file (method cases?)
import sys, clr
import utils
from dbmodels import DatabaseConnectionError
clr.AddReference('System')
from System.Threading import Mutex
clr.AddReference('System.Windows.Forms')
from System.Windows.Forms import DialogResult, MessageBox, \
MessageBoxButtons, MessageBoxIcon, SaveFileDialog
from System.IO import StreamWriter
from System.Text import UTF8Encoding
# a global variable, an instance of the Logger class that will only be
# available when this module has been "installed" ( with install() )
_logger = None
# a global variable, an instance of the main application window that we will
# attach any messageboxes that we spit out to.
_comicrack_window = None
#==============================================================================
def install(comicrack_window):
"""
Installs this module. This must be called before any other method in
this module is called.
Takes a single optional parameter, which is the minimum line width of the
code identifier tag that is prepended onto debug lines.
"""
global _logger, _comicrack_window
if _logger is not None or _comicrack_window is not None:
raise Exception("don't install '" + __name__+ "' module twice")
_comicrack_window = comicrack_window
_logger = _Logger(comicrack_window)
#==============================================================================
def uninstall():
"""
Uninstalls this module.
It is so important to make sure this method is called at the end of your
script, you should probably use a try-finally section to ensure it.
"""
global _logger, _comicrack_window
if _logger:
_logger.free()
_logger = None
if _comicrack_window:
_comicrack_window = None
#==============================================================================
def debug(*messages):
"""
Writes the given single-line message to the debug log.
Arguments to this method (any number of them, including none) will be
converted to a string, then concatenated together, and finally a newline will
be appended on the end. The result wil be written out to the debug log.
Arguments are usually strings or numbers, but can be anything with a working
__str__ method, or even 'None'.
"""
global _logger
if _logger:
_logger.debug(*messages)
#==============================================================================
def debug_exc(message=''):
"""
Writes the current error stack trace (i.e. from the current thread) to the
debug log. This method should be only be called when that trace is current;
i.e from within the 'except' section of a try-except block.
Unlike the handle_error() method, this method will NOT display any dialog
or other information to the user. Its ONLY function is to write the
currently caught exception out to the debug log.
"""
global _logger
if _logger:
_logger.debug_exc(message)
#==============================================================================
def dump(filename):
"""
Writes the entire contents of the debug log (since the install() method was
called) to the given file. This does not clear the log.
"""
global _logger
if _logger:
_logger.dump(filename)
#==============================================================================
def handle_error(error):
'''
Handles the given error object (a python or .net exception) by formatting it
nicely and then printing it to the debug log. Then an "unexpected error"
message is displayed for the user in a modal dialog (owned by the main
comicrack app window that was passed into the log.install() method. )
This method should be the application's normal way to handle unexpected
errors and exceptions.
'''
global _logger, _comicrack_window
if not _logger or not _comicrack_window:
return
# if none, do current python exception. else sstr() the given exception
if isinstance(error, Exception):
debug("------------------- PYTHON ERROR ------------------------")
debug_exc() # a python exception
else:
debug("-------------------- .NET ERROR -------------------------")
debug(utils.sstr(error).replace('\r','')) # a .NET exception
if type(error) == DatabaseConnectionError:
# if this is a DatabaseConnectionError, then it is a semi-expected
# error that usually occurs when the database website goes down.
# Thus, it gets a special error message.
MessageBox.Show(_comicrack_window,
'The ' + error.db_name_s() + ' website could not be reached. It is\n'+
'possible that the website is not responding, or that\n' +
'you are not connected to the internet.\n\n' +
'Please try again later.', "Cannot Access Comic Database",
MessageBoxButtons.OK, MessageBoxIcon.Warning)
else:
# all other errors are considered "unexpected", and handled generically
result = MessageBox.Show(_comicrack_window,
'An unexpected error occurred. Would you like to save\n' +
'a log file with more details about this problem?',
"Unexpected Error", MessageBoxButtons.YesNo, MessageBoxIcon.Error)
if result == DialogResult.Yes:
dialog = SaveFileDialog()
dialog.Title = 'Save Log File'
dialog.Filter = 'All Files (*.*)|*.*'
try:
if dialog.ShowDialog() == DialogResult.OK:
dump(dialog.FileName)
MessageBox.Show(_comicrack_window,\
'A log file for this error has been saved. Please consider\n'+
"reporting this incident (and submitting the log) to the\n" +
"issue tracker on the main Comic Vine Scraper website.",
'Saved Log File',\
MessageBoxButtons.OK, MessageBoxIcon.Information)
except:
debug_exc()
MessageBox.Show(_comicrack_window, "Error saving debug log.")
#==============================================================================
class _Logger(object):
""" A hidden class that implements the public api of this module. """
#==========================================================================
def __init__(self, comicrack_window):
"""
Initializes this class. Only one may be initialized at a time!
comicrack_window => All writing to stdout will occur on the comicrack
windows event pump thread (i.e. invoked via a delegate).
This is needed because comicrack has a gui implementation of stdout
that we want to be properly in sync with.
"""
if ( sys.stdout != sys.__stdout__ or sys.stderr != sys.__stderr__):
raise "do not instantiate two instances of this class!!"
# the comicrack window whose gui stdout we will write debug messages to
self._comicrack_window = comicrack_window
# the log of all debugged output that this class creates
self._logLines = []
# a mutex that protects all access to the logLines (above)
self._mutex = Mutex()
sys.stdout = self
sys.stderr = self
#==========================================================================
def free(self):
""" Frees up all resources owned or co-opted by this class """
# protect access to the logLines by using the mutex.
self._mutex.WaitOne(0)
try:
self._logLines = None
self._comicrack_window = None
sys.stdout = sys.__stdout__
sys.stderr = sys.__stderr__
finally:
self._mutex.ReleaseMutex()
# not perfectly threadsafe, but close enough since this method is only
# really called when the app is completely finished running anyhow
self._mutex.Dispose()
#==========================================================================
def debug(self, *messages):
""" Implements the module-level debug() method """
strings = map(utils.sstr,messages)
strings.append('\n')
self._debugRaw( ''.join(strings) )
#==========================================================================
def _debugRaw(self, message=''):
""" Records the given message, and writes it out to the -real- stdout. """
# protect access to the logLines with a mutex (for multiple threads)
self._mutex.WaitOne(0)
try:
if self._logLines == None:
raise Exception("you must install the _Logger before using it")
try:
output_line = utils.sstr(message)
except:
# shouldn't happen!
output_line = "***** LOGGING ERROR *****"
self._logLines.append( output_line )
sys.__stdout__.write(output_line)
finally:
self._mutex.ReleaseMutex()
#==========================================================================
def debug_exc(self, message):
if not (message is None) and len(message.strip()) > 0:
self.debug(message)
""" Implements the module-level debug_exc() method. """
try:
self.debug(''.join(['Caught ', sys.exc_info()[0].__name__, ': ',
utils.sstr(sys.exc_info()[1])]))
except:
self.debug(": Exception name couldn't be formatted :")
try:
self.debug("Traceback (most recent call last):")
for line in self._getCurrentStackTrace():
self.debug(self._formatTraceLine(line))
except:
self.debug(": Traceback couldn't be formatted :")
#==========================================================================
def _getCurrentStackTrace(self):
"""
Retrieves the current stacktrace, as a list of triples:
(filename, lineno, codename)
"""
traceback = sys.exc_info()[2]
stackTrace = []
while traceback is not None:
frame = traceback.tb_frame
lineno = traceback.tb_lineno
code = frame.f_code
filename = code.co_filename
name = code.co_name
stackTrace.append((filename, lineno, name))
traceback = traceback.tb_next
return stackTrace
#==========================================================================
def _formatTraceLine(self, lineInfo):
""" Formats the triples from _getCurrentStackTrace() into a nice line. """
fileName, lineNo, name = lineInfo
line = ' File "%s", line %s, in %s' % (fileName, lineNo, name)
return line
#==========================================================================
def dump(self, filename):
""" Implements the module-level dump() method. """
# protect access to the logLines with a mutex (for multiple threads)
self._mutex.WaitOne(0)
try:
if self._logLines == None:
raise Exception("you must install the _Logger before using it")
try:
writer = StreamWriter(filename, False, UTF8Encoding())
for line in self._logLines:
writer.Write(line)
finally:
if writer: writer.Dispose()
finally:
self._mutex.ReleaseMutex()
#==========================================================================
def write (self, obj):
"""
This method exists in order to make this class into a 'file-like'
object, which can be used to directly co-opt stdout and stderr.
"""
self._debugRaw(obj)
| Python |
'''
@author: Cory Banack
'''
# corylow: comment and cleanup this file
import clr
import resources
from utils import persist_map, load_map
clr.AddReference('System')
from System.IO import File
class Configuration(object):
__UPDATE_SERIES = 'updateSeries'
__UPDATE_NUMBER = 'updateNumber'
__UPDATE_MONTH = 'updateMonth'
__UPDATE_TITLE = 'updateTitle'
__UPDATE_ALT_SERIES = 'updateAltSeries'
__UPDATE_WRITER = 'updateWriter'
__UPDATE_PENCILLER = 'updatePenciller'
__UPDATE_INKER = 'updateInker'
__UPDATE_COVER_ARTIST = 'updateCoverArtist'
__UPDATE_COLORIST = 'updateColorist'
__UPDATE_LETTERER = 'updateLetterer'
__UPDATE_EDITOR = 'updateEditor'
__UPDATE_SUMMARY = 'updateSummary'
__UPDATE_YEAR = 'updateYear'
__UPDATE_IMPRINT = 'updateImprint'
__UPDATE_PUBLISHER = 'updatePublisher'
__UPDATE_VOLUME = 'updateVolume'
__UPDATE_CHARACTERS = 'updateCharacters'
__UPDATE_TEAMS = 'updateTeams'
__UPDATE_LOCATIONS = 'updateLocations'
__UPDATE_WEBPAGE = 'updateWebpage'
__UPDATE_RATING = 'updateRating'
__OVERWRITE_EXISTING = 'overwriteExisting'
__IGNORE_BLANKS = 'ignoreBlanks'
__CONVERT_IMPRINTS = 'convertImprints'
__SPECIFY_SERIES = 'specifySeriesName'
__SHOW_COVERS = 'showCovers'
__DOWNLOAD_THUMBS = 'downloadThumbs'
__PRESERVE_THUMBS = 'preserveThumbs'
__SCRAPE_IN_GROUPS = 'scrapeInGroups'
__FAST_RESCRAPE = 'fastRescrape'
__RESCRAPE_NOTES = 'updateNotes'
__RESCRAPE_TAGS = 'updateTags'
__SUMMARY_DIALOG = 'summaryDialog'
def __init__(self):
self.ow_existing_b = True
self.ignore_blanks_b = False
self.convert_imprints_b = True
self.specify_series_b = False
self.show_covers_b = True
self.download_thumbs_b = True
self.preserve_thumbs_b = False
self.scrape_in_groups_b = True
self.fast_rescrape_b = True
self.rescrape_notes_b = True
self.rescrape_tags_b = True
self.summary_dialog_b = True
self.update_series_b = True
self.update_number_b = True
self.update_month_b = True
self.update_title_b = True
self.update_alt_series_b = True
self.update_writer_b = True
self.update_penciller_b = True
self.update_inker_b = True
self.update_cover_artist_b = True
self.update_colorist_b = True
self.update_letterer_b = True
self.update_editor_b = True
self.update_summary_b = True
self.update_imprint_b = True
self.update_year_b = True
self.update_publisher_b = True
self.update_volume_b = True
self.update_characters_b = True
self.update_teams_b = True
self.update_locations_b = True
self.update_webpage_b = True
self.update_rating_b = True
# this is a general purpose map that is easily available to every part
# of the application, and lasts only as long as the current scraping
# session. it is an place for one part of the app to save ad-hoc,
# session-based data for other parts of the program in a highly
# flexible, unstructured fashion.
self.session_data_map = {}
return self
def load_defaults(self):
# load the loaded dict out of the serialized file
loaded = {}
if File.Exists(resources.SETTINGS_FILE):
loaded = load_map(resources.SETTINGS_FILE)
# any settings that the serialized dict happens to contain, u
if Configuration.__UPDATE_SERIES in loaded:
self.update_series_b = loaded[Configuration.__UPDATE_SERIES]
if Configuration.__UPDATE_NUMBER in loaded:
self.update_number_b = loaded[Configuration.__UPDATE_NUMBER]
if Configuration.__UPDATE_MONTH in loaded:
self.update_month_b = loaded[Configuration.__UPDATE_MONTH]
if Configuration.__UPDATE_TITLE in loaded:
self.update_title_b = loaded[Configuration.__UPDATE_TITLE]
if Configuration.__UPDATE_ALT_SERIES in loaded:
self.update_alt_series_b = loaded[Configuration.__UPDATE_ALT_SERIES]
if Configuration.__UPDATE_WRITER in loaded:
self.update_writer_b = loaded[Configuration.__UPDATE_WRITER]
if Configuration.__UPDATE_PENCILLER in loaded:
self.update_penciller_b = loaded[Configuration.__UPDATE_PENCILLER]
if Configuration.__UPDATE_INKER in loaded:
self.update_inker_b = loaded[Configuration.__UPDATE_INKER]
if Configuration.__UPDATE_COVER_ARTIST in loaded:
self.update_cover_artist_b = loaded[Configuration.__UPDATE_COVER_ARTIST]
if Configuration.__UPDATE_COLORIST in loaded:
self.update_colorist_b = loaded[Configuration.__UPDATE_COLORIST]
if Configuration.__UPDATE_LETTERER in loaded:
self.update_letterer_b = loaded[Configuration.__UPDATE_LETTERER]
if Configuration.__UPDATE_EDITOR in loaded:
self.update_editor_b = loaded[Configuration.__UPDATE_EDITOR]
if Configuration.__UPDATE_SUMMARY in loaded:
self.update_summary_b = loaded[Configuration.__UPDATE_SUMMARY]
if Configuration.__UPDATE_YEAR in loaded:
self.update_year_b = loaded[Configuration.__UPDATE_YEAR]
if Configuration.__UPDATE_IMPRINT in loaded:
self.update_imprint_b = loaded[Configuration.__UPDATE_IMPRINT]
if Configuration.__UPDATE_PUBLISHER in loaded:
self.update_publisher_b = loaded[Configuration.__UPDATE_PUBLISHER]
if Configuration.__UPDATE_VOLUME in loaded:
self.update_volume_b = loaded[Configuration.__UPDATE_VOLUME]
if Configuration.__UPDATE_CHARACTERS in loaded:
self.update_characters_b = loaded[Configuration.__UPDATE_CHARACTERS]
if Configuration.__UPDATE_TEAMS in loaded:
self.update_teams_b = loaded[Configuration.__UPDATE_TEAMS]
if Configuration.__UPDATE_LOCATIONS in loaded:
self.update_locations_b = loaded[Configuration.__UPDATE_LOCATIONS]
if Configuration.__UPDATE_WEBPAGE in loaded:
self.update_webpage_b = loaded[Configuration.__UPDATE_WEBPAGE]
if Configuration.__UPDATE_RATING in loaded:
self.update_rating_b = loaded[Configuration.__UPDATE_RATING]
if Configuration.__OVERWRITE_EXISTING in loaded:
self.ow_existing_b = loaded[Configuration.__OVERWRITE_EXISTING]
if Configuration.__IGNORE_BLANKS in loaded:
self.ignore_blanks_b = loaded[Configuration.__IGNORE_BLANKS]
if Configuration.__CONVERT_IMPRINTS in loaded:
self.convert_imprints_b = loaded[Configuration.__CONVERT_IMPRINTS]
if Configuration.__SPECIFY_SERIES in loaded:
self.specify_series_b = loaded[Configuration.__SPECIFY_SERIES]
if Configuration.__SHOW_COVERS in loaded:
self.show_covers_b = loaded[Configuration.__SHOW_COVERS]
if Configuration.__DOWNLOAD_THUMBS in loaded:
self.download_thumbs_b=loaded[Configuration.__DOWNLOAD_THUMBS]
if Configuration.__PRESERVE_THUMBS in loaded:
self.preserve_thumbs_b=loaded[Configuration.__PRESERVE_THUMBS]
if Configuration.__FAST_RESCRAPE in loaded:
self.fast_rescrape_b=loaded[Configuration.__FAST_RESCRAPE]
if Configuration.__SCRAPE_IN_GROUPS in loaded:
self.scrape_in_groups_b=loaded[Configuration.__SCRAPE_IN_GROUPS]
if Configuration.__RESCRAPE_NOTES in loaded:
self.rescrape_notes_b = loaded[Configuration.__RESCRAPE_NOTES]
if Configuration.__RESCRAPE_TAGS in loaded:
self.rescrape_tags_b = loaded[Configuration.__RESCRAPE_TAGS]
if Configuration.__SUMMARY_DIALOG in loaded:
self.summary_dialog_b = loaded[Configuration.__SUMMARY_DIALOG]
def save_defaults(self):
defaults = {}
defaults[Configuration.__UPDATE_SERIES] = self.update_series_b
defaults[Configuration.__UPDATE_NUMBER] = self.update_number_b
defaults[Configuration.__UPDATE_MONTH] = self.update_month_b
defaults[Configuration.__UPDATE_TITLE] = self.update_title_b
defaults[Configuration.__UPDATE_ALT_SERIES] = self.update_alt_series_b
defaults[Configuration.__UPDATE_WRITER] = self.update_writer_b
defaults[Configuration.__UPDATE_PENCILLER] = self.update_penciller_b
defaults[Configuration.__UPDATE_INKER] = self.update_inker_b
defaults[Configuration.__UPDATE_COVER_ARTIST] = self.update_cover_artist_b
defaults[Configuration.__UPDATE_COLORIST] = self.update_colorist_b
defaults[Configuration.__UPDATE_LETTERER] = self.update_letterer_b
defaults[Configuration.__UPDATE_EDITOR] = self.update_editor_b
defaults[Configuration.__UPDATE_SUMMARY] = self.update_summary_b
defaults[Configuration.__UPDATE_YEAR] = self.update_year_b
defaults[Configuration.__UPDATE_IMPRINT] = self.update_imprint_b
defaults[Configuration.__UPDATE_PUBLISHER] = self.update_publisher_b
defaults[Configuration.__UPDATE_VOLUME] = self.update_volume_b
defaults[Configuration.__UPDATE_CHARACTERS] = self.update_characters_b
defaults[Configuration.__UPDATE_TEAMS] = self.update_teams_b
defaults[Configuration.__UPDATE_LOCATIONS] = self.update_locations_b
defaults[Configuration.__UPDATE_WEBPAGE] = self.update_webpage_b
defaults[Configuration.__UPDATE_RATING] = self.update_rating_b
defaults[Configuration.__OVERWRITE_EXISTING] = self.ow_existing_b
defaults[Configuration.__CONVERT_IMPRINTS] = self.convert_imprints_b
defaults[Configuration.__SPECIFY_SERIES] = self.specify_series_b
defaults[Configuration.__IGNORE_BLANKS] = self.ignore_blanks_b
defaults[Configuration.__SHOW_COVERS] = self.show_covers_b
defaults[Configuration.__DOWNLOAD_THUMBS] = self.download_thumbs_b
defaults[Configuration.__PRESERVE_THUMBS] = self.preserve_thumbs_b
defaults[Configuration.__SCRAPE_IN_GROUPS] = self.scrape_in_groups_b
defaults[Configuration.__FAST_RESCRAPE] = self.fast_rescrape_b
defaults[Configuration.__RESCRAPE_NOTES] = self.rescrape_notes_b
defaults[Configuration.__RESCRAPE_TAGS] = self.rescrape_tags_b
defaults[Configuration.__SUMMARY_DIALOG] = self.summary_dialog_b
persist_map(defaults, resources.SETTINGS_FILE)
def __ne__(self, other):
return not self.__eq__(other)
def __eq__(self, other):
return \
self.ow_existing_b == other.ow_existing_b and \
self.ignore_blanks_b == other.ignore_blanks_b and \
self.convert_imprints_b == other.convert_imprints_b and \
self.specify_series_b == other.specify_series_b and \
self.show_covers_b == other.show_covers_b and \
self.download_thumbs_b == other.download_thumbs_b and \
self.preserve_thumbs_b == other.preserve_thumbs_b and \
self.scrape_in_groups_b == other.scrape_in_groups_b and \
self.fast_rescrape_b == other.fast_rescrape_b and \
self.rescrape_notes_b == other.rescrape_notes_b and \
self.rescrape_tags_b == other.rescrape_tags_b and \
self.summary_dialog_b == other.summary_dialog_b and \
\
self.update_series_b == other.update_series_b and \
self.update_number_b == other.update_number_b and \
self.update_month_b == other.update_month_b and \
self.update_title_b == other.update_title_b and \
self.update_alt_series_b == other.update_alt_series_b and \
self.update_writer_b == other.update_writer_b and \
self.update_penciller_b == other.update_penciller_b and \
self.update_inker_b == other.update_inker_b and \
self.update_cover_artist_b == other.update_cover_artist_b and \
self.update_colorist_b == other.update_colorist_b and \
self.update_letterer_b == other.update_letterer_b and \
self.update_editor_b == other.update_editor_b and \
self.update_summary_b == other.update_summary_b and \
self.update_imprint_b == other.update_imprint_b and \
self.update_year_b == other.update_year_b and \
self.update_publisher_b == other.update_publisher_b and \
self.update_volume_b == other.update_volume_b and \
self.update_characters_b == other.update_characters_b and \
self.update_teams_b == other.update_teams_b and \
self.update_locations_b == other.update_locations_b and \
self.update_webpage_b == other.update_webpage_b and \
self.update_rating_b == other.update_rating_b
def __str__(self):
def x(x):
return 'X' if x else ' '
return \
"--------------------------------------------------------------------\n"+\
"[{0}] Series".format(x(self.update_series_b)).ljust(20) +\
"[{0}] Number".format(x(self.update_number_b)).ljust(20) +\
"[{0}] Month".format(x(self.update_month_b)).ljust(20) +\
"\n" + \
"[{0}] Title".format(x(self.update_title_b)).ljust(20) +\
"[{0}] Alt Series".format(x(self.update_alt_series_b)).ljust(20) +\
"[{0}] Writer".format(x(self.update_writer_b)).ljust(20) +\
"\n" + \
"[{0}] Penciller".format(x(self.update_penciller_b)).ljust(20) +\
"[{0}] Inker".format(x(self.update_inker_b)).ljust(20) +\
"[{0}] Cover Art".format(x(self.update_cover_artist_b)).ljust(20) +\
"\n" + \
"[{0}] Colorist".format(x(self.update_colorist_b)).ljust(20) +\
"[{0}] Letterer".format(x(self.update_letterer_b)).ljust(20) +\
"[{0}] Editor".format(x(self.update_editor_b)).ljust(20) +\
"\n" + \
"[{0}] Summary".format(x(self.update_summary_b)).ljust(20) +\
"[{0}] Imprint".format(x(self.update_imprint_b)).ljust(20) +\
"[{0}] Year".format(x(self.update_year_b)).ljust(20) +\
"\n" + \
"[{0}] Publisher".format(x(self.update_publisher_b)).ljust(20) +\
"[{0}] Volume".format(x(self.update_volume_b)).ljust(20) +\
"[{0}] Characters".format(x(self.update_characters_b)).ljust(20) +\
"\n" + \
"[{0}] Teams".format(x(self.update_teams_b)).ljust(20) +\
"[{0}] Locations".format(x(self.update_locations_b)).ljust(20) +\
"[{0}] Webpage".format(x(self.update_webpage_b)).ljust(20) +\
"\n" + \
"[{0}] Rating".format(x(self.update_rating_b)).ljust(20) +\
"\n" +\
"-------------------------------------------------------------------\n"+\
"[{0}] Overwrite Existing".format(x(self.ow_existing_b)).ljust(30)+\
"[{0}] Ignore Blanks".format(x(self.ignore_blanks_b)).ljust(30) +\
"\n" + \
"[{0}] Convert Imprints".format(x(self.convert_imprints_b)).ljust(30) +\
"[{0}] Specify Series".format(x(self.specify_series_b)).ljust(30)+\
"\n" + \
"[{0}] Download Thumbs".format(x(self.download_thumbs_b)).ljust(30) +\
"[{0}] Preserve Thumbs".format(x(self.preserve_thumbs_b)).ljust(30)+\
"\n" + \
"[{0}] Scrape in Groups".format(x(self.scrape_in_groups_b)).ljust(30)+\
"[{0}] Rescraping: Notes".format(x(self.rescrape_notes_b)).ljust(30)+\
"\n" + \
"[{0}] Fast Rescrape".format(x(self.fast_rescrape_b)).ljust(30)+\
"[{0}] Rescraping: Tags".format(x(self.rescrape_tags_b)).ljust(30) +\
"\n" + \
"[{0}] Show Covers".format(x(self.show_covers_b)).ljust(30)+\
"[{0}] Summary Dialog".format(x(self.summary_dialog_b)).ljust(30)+\
"\n" + \
"-------------------------------------------------------------------" | Python |
'''
This module contains a variety of generally useful utility methods.
@author: Cory Banack
'''
import clr
import log
from time import strftime
import re
import sys
clr.AddReference('System')
from System.IO import File
clr.AddReference('System.Drawing')
from System.Drawing import Graphics, Bitmap
clr.AddReference('IronPython')
from IronPython.Compiler import CallTarget0
#==============================================================================
def is_string(object):
''' returns a boolean indicating whether the given object is a string '''
if object is None:
return False
return isinstance(object, basestring)
#==============================================================================
def is_number(s):
''' returns a boolean indicating whether the given object is a number, or
a string that can be converted to a number. '''
try:
float(s)
return True
except:
return False
#==============================================================================
def sstr(object):
''' safely converts the given object into a string (sstr = safestr) '''
if object is None:
return '<None>'
if is_string(object):
# this is needed, because str() breaks on some strings that have unicode
# characters, due to a python bug. (all strings in python are unicode.)
return object
return str(object)
# ==========================================================================
def invoke(control, delegate, synchronous = True):
'''
Invokes the given delegate (method) on the given control/form's
GUI thread. If the control doesn't have access to a working window
handle, usually because the Form it is in was disposed) then do nothing.
The 'synchronous' parameter determines whether or not this method blocks
while the given delegate is run. True means block, false means don't.
'''
if not control or not delegate:
raise TypeError("null parameter not allowed!")
if True or control.IsHandleCreated and \
not control.IsDisposed and not control.Disposing:
try:
if synchronous:
# allow thrown exceptions to bubble up
if control.InvokeRequired:
control.Invoke(CallTarget0(delegate))
else:
delegate()
else:
control.BeginInvoke(CallTarget0(delegate))
except:
if sys.exc_info()[0].__name__ == "SystemError" and \
sstr(sys.exc_info()[1]).find("handle") != -1:
# deliberately ignore this exception, its unavoidably possible
# since we might invoke on a control via a thread that doesn't
# own that control's handle--the handle can dissappear at any time
#
# see issue 147 for an example of this happening:
#
# "SystemError: Invoke or BeginInvoke cannot be called on a control
# until the window handle has been created."
pass
else:
# any other exceptions should be raised and reported as usual
raise
#==============================================================================
def convert_roman_numerals(num_s):
'''
Converts the given string into an positive or negative integer value,
throwing an exception if it can't. The given string can be a integer value
in regular arabic form (1, 2, 3,...) or roman form (i, ii, iii, iv,...).
The returned value will be an integer.
Note that roman numerals outside the range [-20, 20] and 0 are not supported.
'''
roman_mapping = {'i':1, 'ii':2,'iii':3,'iv':4,'v':5,'vi':6,'vii':7,'viii':8,
'ix':9,'x':10,'xi':11,'xii':12,'xiii':13,'xiv':14,'xv':15,
'xvi':16,'xvii':17,'xviii':18,'xix':19,'xx':20}
num_s = num_s.replace(' ', '').strip().lower();
negative = num_s.startswith('-')
if negative:
num_s = num_s[1:]
retval = None
try:
retval = int(num_s)
except:
retval = roman_mapping[num_s]
return retval * -1 if negative else retval
#==============================================================================
def convert_number_words(phrase_s, expand_b):
"""
Converts all of the number words (as defined by regular expression 'words')
in the given phrase, either expanding or contracting them as specified.
When expanding, words like '1' and '2nd' will be transformed into 'one'
and 'second' in the returned string. When contracting, the transformation
goes in reverse.
This method only works for numbers up to 20, and it only works properly
on lower case strings.
"""
number_map = {'0': 'zero', '1': 'one', '2': 'two', '3': 'three',\
'4': 'four', '5': 'five', '6': 'six','7': 'seven', '8': 'eight',\
'9': 'nine', '10': 'ten', '11': 'eleven', '12': 'twelve',\
'13': 'thirteen', '14': 'fourteen', '15': 'fifteen',\
'16': 'sixteen', '17': 'seventeen', '18': 'eighteen', '19': 'nineteen',\
'20': 'twenty', '0th': 'zeroth', '1rst': 'first', '2nd': 'second',\
'3rd': 'third', '4th': 'fourth', '5th': 'fifth', '6th': 'sixth',\
'7th': 'seventh', '8th': 'eighth', '9th': 'ninth', '10th': 'tenth',\
'11th': 'eleventh', '12th': 'twelveth', '13th': 'thirteenth',\
'14th': 'fourteenth', '15th': 'fifteenth', '16th': 'sixteenth',\
'17th': 'seventeenth', '18th': 'eighteenth', '19th': 'nineteenth',\
'20th': 'twentieth'}
b = r'\b'
if expand_b:
for (x,y) in number_map.iteritems():
phrase_s = re.sub(b+x+b, y, phrase_s);
phrase_s = re.sub(r'\b1st\b', 'first', phrase_s);
else:
for (x,y) in number_map.iteritems():
phrase_s = re.sub(b+y+b, x, phrase_s);
phrase_s = re.sub(r'\btwelfth\b', '12th', phrase_s);
phrase_s = re.sub(r'\beightteenth\b', '18th', phrase_s);
return phrase_s
#==============================================================================
def persist_map(map, file):
"""
Writes the given map of strings-to-values into a file, by converting all of
its values into strings. Any key value pair that contains the ':'
character will not be written out. All other contents that were in the
given file will be destroyed.
"""
# corylow: switch this to use .NET and utf8 (during load, too)
try:
with open(file, 'w') as f:
f.write(":: This file was generated on "\
+ strftime(r'%Y.%m.%d %X') + "\n\n")
keys = map.keys()
keys.sort()
for key in keys:
value = sstr(map[key]).strip()
key = sstr(key).strip()
if ':' in key or ':' in value:
log.debug("WARNING: can't write map entry containing ':'; ",
key, " -> ", value)
else:
f.write(key + ' : ' + value + "\n")
except:
log.debug_exc("problem saving mapfile: " + sstr(file))
#==============================================================================
def load_map(file):
"""
Reads a map out of the given file, which was created with the persist_map
function. All keys in the returned map will be strings, but the values will
be converted to integers, booleans and floats where possible.
If this given file doesn't exist, this method returns an empty map.
"""
retval = {}
try:
if File.Exists(file):
with open(file, 'r') as f:
lines = f.readlines()
for pair in [x.strip().split(':') for x in lines]:
if len(pair) == 2:
key = pair[0].strip()
value = pair[1].strip()
if value.lower() == "false":
value = False
elif value.lower() == "true":
value = True
else:
try:
if '.' in value: value = float(value)
else: value = int(value)
except:
pass
retval[key] = value
except:
log.debug_exc("problem loading mapfile " + sstr(file))
retval = {}
return retval
#==============================================================================
def strip_back_cover(image):
"""
Checks the given image to see if it has the pixel ratio of 2 comic book pages
side by side. If it does NOT, then this method does nothing and the given
image is returned without modification.
But if it does have that magic pixel ratio, then we assume that the 2 pages
are the front and back covers of a comic book (on the right and left halves
of the image, respectively) and we create and return a NEW image that
contains only the front cover (right half) of the original.
If a new image is returned, this method will call Dispose on the original.
"""
if image:
pixel_ratio = float(image.Width)/float(image.Height)
if pixel_ratio < 1.5 and pixel_ratio > 1.2:
# create a new image with the back cover (left half) deleted
new_image = Bitmap(image.Width/2, image.Height)
graphics = Graphics.FromImage(new_image)
graphics.DrawImage(image, -image.Width/2, 0)
graphics.Dispose()
image.Dispose();
image = new_image
return image
| Python |
'''
This module contains utility methods for working with ComicRack
ComicBook objects (i.e. 'book' objects).
@author: Cory Banack
'''
import re
from dbmodels import IssueRef
# =============================================================================
def extract_issue_ref(book):
'''
This method looks in the tags and notes fields of the given book for
evidence that the given ComicBook has been scraped before. If possible,
it will construct an IssueRef based on that evidence, and return it.
If not, it will return None.
If the user has manually added a "skip" flag to one of those fields, this
method will return the string "skip", which should be interpreted as
"never scrape this book".
'''
# coryhigh: move this into comicbook. or maybe the db layer?
# ===== corylow: SEPARATION OF CONCERNS: this assumes CV! ==================
tag_found = re.search(r'(?i)CVDB(\d{1,}|SKIP)', book.tags_s)
if not tag_found:
tag_found = re.search(r'(?i)CVDB(\d{1,}|SKIP)', book.notes_s)
if not tag_found:
tag_found = re.search(r'(?i)ComicVine.?\[(\d{1,})', book.notes_s)
retval = None
if tag_found:
retval = tag_found.group(1).lower()
try:
if retval != 'skip':
retval = IssueRef(book.issue_num_s, int(retval))
except:
retval = None
return retval | Python |
'''
This module is home to the Scheduler class.
@author: Cory Banack
'''
import clr
import log
clr.AddReference('System')
from System.Threading import Monitor, Thread, ThreadStart
# =============================================================================
class Scheduler(object):
'''
A class that maintains its own thread, which can be used to invoke "tasks"
(methods) in serial, last-in-ignore-everything-else order. That means
that, in addition to the currently running task, if any, there can be at
most one other task (the most recently submitted one) queue up to run next.
Queueing a large number of tasks in a short period of time will lead to the
first, last, and an indeterminate number of the rest actually getting run.
Do not forget to call the 'shutdown' method on any instance of this class
once it will no longer be used, so that its background thread can be safely
disposed of.
'''
# ==========================================================================
def __init__(self):
# the next scheduled task. if this is "self", it means that the next
# scheduled task is to shut down the __scheduler's background thread.
self.task = None;
# the background thread that tasks get run on
self.loop_thread = self.__start_thread_loop()
# ==========================================================================
def submit(self, task):
'''
Submits the given task (a method handle) to this Scheduler, to be run on
the background thread. if the Scheduler is idle, the given task will be
run almost immediately. If the Scheduler is busy, the given task will be
run as soon as the Scheduler finishes its current task, UNLESS a new task
is added before the given task has a chance to start. In that case, the
new task will take the given task's place in line, and the given task will
never be executed.
If this Scheduler has been shutdown, the given task will never be run.
'''
if task:
Monitor.Enter(self)
try:
if self.task != self:
# notice this replace any task that is waiting to be run
self.task = task
Monitor.Pulse(self)
finally:
Monitor.Exit(self)
# ==========================================================================
def shutdown(self, block):
'''
Shuts down this Scheduler, after it has finished any task that it may
currently be running. After this method is called, no further submitted
tasks will be run by this Scheduler, ever. You MUST call this method in
order to clean up this Scheduler properly.
The 'block' boolean parameter indicates whether this method should block
until the Scheduler thread has finished running any last task
and shutting down (true), or should return immediately (false).
'''
self.submit(self) # submitting "self" is a magic trick that shuts down
if block:
self.loop_thread.Join()
# ==========================================================================
def __start_thread_loop(self):
'''
Starts (and returns) the background thread, which will wait-loop forever,
running tasks that are submitted via the 'submit' method, until it is
flagged by the 'shutdown' method to terminate.
'''
def threadloop():
task = None
while task != self:
try:
Monitor.Enter(self)
try:
task = self.task
if task != self:
self.task = None
if task is None:
Monitor.Wait(self)
finally:
Monitor.Exit(self)
if task != self and task is not None:
task()
except Exception as ex:
# slightly odd error handling, cause this thread should NEVER
# die as the result of an exception!
try: log.handle_error(ex)
except: pass
task = None
thread = Thread(ThreadStart(threadloop))
thread.IsBackground = True
thread.Start()
return thread | Python |
'''
@author: Cory Banack
'''
#corylow: comment and cleanup this file
import clr
clr.AddReference('System.Windows.Forms')
from System.Windows.Forms import DataGridView
# a specialized DataGridView that replaces it's "enter" keystroke with a
# click event on the button given in its constructor
class ButtonDataGridView(DataGridView):
def __init__(self, button):
self._button = button
def ProcessCmdKey(self, msg, key):
if key.ToString() == 'Return':
self._button.PerformClick()
return True
else:
return super(ButtonDataGridView,self).ProcessCmdKey(msg, key) | Python |
'''
This module contains the DBPictureBox class, which can be used to display
remotely loaded images using an asyncrhonous off-loading thread manner.
@author: Cory Banack
'''
import clr
import resources
import utils
import db
from scheduler import Scheduler
clr.AddReference('System.Drawing')
from System.Drawing import Graphics, Bitmap
from System.Drawing import Rectangle, GraphicsUnit
from System.Drawing.Imaging import ColorMatrix, ImageAttributes
clr.AddReference('System.Windows.Forms')
from System.Windows.Forms import PictureBox, PictureBoxSizeMode
#==============================================================================
class DBPictureBox(PictureBox):
'''
This class extends a regular .NET PictureBox so that it's image can be set
asynchronously by giving it a SeriesRef or IssueRef object (the
set_image_ref method). This ref object is then used to query the
comic book database to find the appropriate image (if any) to set.
Because accessing a database can be slow and potentially prone to errors,
this class contains a number of features to automatically handle
problems that may occur:
1) All image retrieval happens asyncronously on an external thread. The
final result is only loaded into this picturebox when the image retrieval is
finished. If many image retrievals are requested nearly simulataneously,
most of them will be ignored, and only the most recent one is guaranteed to
actually be performed (and update the displayed image.)
2) All image retrieval is short-term cached, so if you switch the image ref
back to a previous one, it will automatically use the cached image instead of
reloading.
'''
#===========================================================================
def __init__(self):
''' Defines member variables for new instances of this class. '''
# the ref of whatever image should currently be displayed, or None
self.__current_image_ref = None
# a simple "last-in-and-ignore-everything-else" scheduler
self.__scheduler = Scheduler()
# the cache of ref.key->image objects where we store all loaded images
self.__image_cache = {}
# the image that gets displayed if we have nothing else to display
self.__unknown_image = resources.createComicVineLogo()
# the image that gets displayed while we are loading another image
self.__loading_image = self.__copy_transparent(self.__unknown_image)
self._initialize()
#===========================================================================
def _initialize(self):
''' Initial configuration for new instances of this class '''
PictureBox.__init__(self)
def visibility_changed(sender, args):
if self.Visible: self.__update_image()
self.VisibleChanged += visibility_changed
self.SizeMode = PictureBoxSizeMode.StretchImage
self.set_image_ref(None)
#===========================================================================
def free(self):
''' Explicitly frees all resources held by this object. '''
self.__scheduler.shutdown(True) # blocks; safer even if gui locks a little
self.__unknown_image.Dispose()
self.__loading_image.Dispose()
for image in self.__image_cache.values():
image.Dispose()
self.__image_cache = {}
PictureBox.Dispose(self, True)
#===========================================================================
def set_image_ref(self, ref):
'''
Sets the image displayed in this picturebox to whatever image
corresponds to the given ref (a SeriesRef or IssueRef) in the database.
If the given value is None, or unavailable, a placeholder image gets used.
'''
self.__current_image_ref = ref
if self.Visible:
self.__update_image()
#===========================================================================
def __copy_transparent(self, image):
''' Creates a semi-transparent copy of the given image '''
b = Bitmap( image.Width, image.Height )
g = Graphics.FromImage(b)
cm = ColorMatrix()
cm.Matrix33 = 0.3
ia = ImageAttributes()
ia.SetColorMatrix(cm)
g.DrawImage(image, Rectangle(0,0, image.Width, image.Height), 0,0,\
image.Width, image.Height, GraphicsUnit.Pixel, ia)
return b
#===========================================================================
def __update_image(self):
'''
Update the currently displayed image on this picturebox to match
whatever image __current_image_ref pulls out of the database or cache.
If the image is coming from the database, loading is done on separate
worker thread, so as not to lock up the UI.
'''
# simple image setter that uses a blank image if 'image' is None
def switchimage( image ):
if image: self.Image = image
else: self.Image = self.__unknown_image
ref = self.__current_image_ref
# 1. if the ref is empty, switch to display an empty image
if not ref and ref != 0:
switchimage(None)
# 2. if the ref is cached, switch to display the cached image
elif ref in self.__image_cache:
switchimage( self.__image_cache[ref] )
# 3. if the ref is unkown, the hard part begins. create a download "task"
# that downloads, caches, and then switches display to the needed
# image. then invoke this task asyncrhonously on the off-thread.
else:
self.Image = self.__loading_image
def download_task():
# 3a. load the image
new_image = db.query_image(ref) # disposed later
# 3b. now that we've loaded a new image, the following method is
# passed back to the gui thread and run to update our gui
def update_image():
# if somehow the image we just loaded is already in the cache,
# take it out and dispose it. this should be rare.
if ref in self.__image_cache:
old_image = self.__image_cache[ref]
del self.__image_cache[ref]
if old_image:
old_image.Dispose()
# cache the new image, so we never have to load it again
if new_image:
self.__image_cache[ref] = new_image
# if the __current_image_ref hasn't changed, switch this
# PictureBox to display that image. otherwise, we already
# loading a new one, so don't do a pointless visual update.
if ref == self.__current_image_ref:
switchimage(new_image)
utils.invoke(self, update_image, False)
self.__scheduler.submit(download_task) | Python |
#corylow: comment and cleanup this file
'''
@author: Cory Banack
'''
import clr
from cvform import CVForm
clr.AddReference('System.Windows.Forms')
from System.Windows.Forms import ProgressBar
clr.AddReference('IronPython')
class ProgressBarForm(CVForm):
def __init__(self, owner, scraper, maximum):
CVForm.__init__(self, owner, "pbformLocation")
pb = ProgressBar()
pb.Name = 'pb'
pb.Minimum = 0
pb.Maximum = maximum
pb.Step = 1
pb.Value = 0
pb.Width = 400
self.Controls.Add(pb)
self.Height = 45
self.Width = 400
self.prog = pb
self.scraper = scraper
self.cancel_on_close = True
def show_form(self):
self.Show(self.Owner)
def OnClosed(self, args):
# if this close occurred as the result of the user manually closing the
# window, that's a 'cancel' request; flip the scraper's cancelled switch.
if self.cancel_on_close:
self.scraper.cancel()
CVForm.OnClosed(self, args)
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.cancel_on_close = False
self.Close()
| Python |
'''
This module is home to the SearchForm class.
@author: Cory Banack
'''
import clr
from cvform import CVForm
clr.AddReference('System.Windows.Forms')
from System.Windows.Forms import AutoScaleMode, Button, \
DialogResult, Label, TextBox
clr.AddReference('System.Drawing')
from System.Drawing import Point, Size
#==============================================================================
class SearchForm(CVForm):
'''
This class is a popup, modal dialog with a text field that asks the user to
specify search terms for a search query on the Comic Vine database.
'''
#===========================================================================
def __init__(self, scraper, initial_search_s):
'''
Initializes this form.
'scraper' -> the currently running ScrapeEngine
'initial_search_s' -> the initial value in this form's text field.
'''
# the text label for this form
self.__label = None
# the search button (i.e. the 'ok' button) for this form
self.__search_button = None
# the skip button for this form
self.__skip_button = None
# the cancel button for this form
self.__cancel_button = None
# the textbox for this form
self.__textbox = None
CVForm.__init__(self, scraper.comicrack.MainWindow, "searchformLocation")
scraper.cancel_listeners.append(self.Close)
self.__build_gui(initial_search_s)
#===========================================================================
def __build_gui(self, initial_search_s):
''' Constructs and initializes the gui for this form. '''
# build each gui component
self.__label = self.__build_label()
self.__search_button = self.__build_searchbutton()
self.__skip_button = self.__build_skipbutton()
self.__cancel_button = self.__build_cancelbutton()
self.__textbox = self.__build_textbox(
initial_search_s, self.__search_button, self.__cancel_button)
# configure this form, and add all gui components to it
self.AutoScaleMode = AutoScaleMode.Font
self.ClientSize = Size(405, 100)
self.Text = 'Search for a Comic Book'
self.Controls.Add(self.__label)
self.Controls.Add(self.__textbox)
self.Controls.Add(self.__search_button)
self.Controls.Add(self.__skip_button)
self.Controls.Add(self.__cancel_button)
# define the keyboard focus tab traversal ordering
self.__textbox.TabIndex = 0
self.__search_button.TabIndex = 1
self.__skip_button.TabIndex = 2
self.__cancel_button.TabIndex = 3
#===========================================================================
def __build_label(self):
''' builds and returns the text label for this form '''
label = Label()
label.Location = Point(10, 10)
label.Size = Size(385, 20)
label.Text = "Search the comic book database for:"
return label
#===========================================================================
def __build_searchbutton(self):
''' builds and returns the search button for this form '''
button = Button()
button.DialogResult = DialogResult.OK
button.Location = Point(150, 70)
button.Size = Size(75, 23)
button.Text = '&Search'
button.UseVisualStyleBackColor = True
return button
#===========================================================================
def __build_skipbutton(self):
''' builds and returns the skip button for this form '''
button = Button()
button.DialogResult = DialogResult.Ignore
button.Location = Point(235, 70)
button.Size = Size(75, 23)
button.Text = '&Skip'
button.UseVisualStyleBackColor = True
return button
#===========================================================================
def __build_cancelbutton(self):
''' builds and returns the cancel button for this form '''
button = Button()
button.DialogResult = DialogResult.Cancel
button.Location = Point(320, 70)
button.Size = Size(75, 23)
button.Text = '&Cancel'
button.UseVisualStyleBackColor = True
return button
#===========================================================================
def __build_textbox(self, initial_text_s, searchbutton, cancelbutton):
'''
Builds and returns the textbox for this form.
initial_text_s -> the starting text for the textbox
searchbutton -> the 'search' button from the containing Form
cancelbutton -> the 'cancel' button from the containing Form
'''
# make a special subclass of TextBox in order to...
class SearchTextBox(TextBox):
# ... capture ESCAPE and ENTER keypresses
def OnKeyPress(self, args):
if args.KeyChar == chr(13):
searchbutton.PerformClick()
args.Handled = True
elif args.KeyChar == chr(27):
cancelbutton.PerformClick()
args.Handled = True
else:
TextBox.OnKeyPress(self, args)
# ... disable the Search button if the textbox is empty
def OnTextChanged(self, args):
searchbutton.Enabled = bool(self.Text.strip())
textbox = SearchTextBox()
textbox.Location = Point(10, 35)
textbox.Size = Size(385, 1)
if initial_text_s:
textbox.Text = initial_text_s
textbox.SelectAll()
return textbox
#===========================================================================
def show_form(self):
'''
Displays this form, blocking until the user closes it. When it is
closed, one of three values will be returned. If SearchFormResult.CANCEL
is returned, it means the user has elected to cancel the scrape operation.
If an SearchFormResults.SKIP is returned, it means the user has elected to
skip the current book. Finally, if anything else is returned it will be
a string that the user has entered--the string that the user wants to
search on.
'''
dialogAnswer = self.ShowDialog( self.Owner ) # blocks
if dialogAnswer == DialogResult.OK:
retval = self.__textbox.Text.strip()
return retval if retval else SearchFormResult.CANCEL
elif dialogAnswer == DialogResult.Ignore:
return SearchFormResult.SKIP
else:
return SearchFormResult.CANCEL
#===========================================================================
class SearchFormResult(object):
''' Results that can be returned from the SearchForm.show_form() method. '''
CANCEL = "cancel"
SKIP = "skip" | Python |
'''
This module is home to the WelcomeForm class.
@author: Cory Banack
'''
import clr
import resources
from cvform import CVForm
from configform import ConfigForm
clr.AddReference('System.Windows.Forms')
from System.Windows.Forms import AutoScaleMode, Button, DialogResult, Label
clr.AddReference('System.Drawing')
from System.Drawing import Point, Size
# =============================================================================
class WelcomeForm(CVForm):
'''
This is the first modal popup dialog that you see when you run the scraper.
It welcomes you to the program, and offers you the ability to change
script settings before continuing.
'''
#===========================================================================
def __init__(self, scraper, books):
'''
Initializes this form.
'scraper' -> this the ScrapeEngine that we are running as part of.
'books' -> a list of all the comic books being scraped.
'''
CVForm.__init__(self, scraper.comicrack.MainWindow, "welcomeformLocation")
self.__build_gui(books);
# ==========================================================================
def __build_gui(self, books):
'''
Constructs and initializes the gui for this form.
'books' -> a list of all the comic books being scraped.
'''
# 1. --- build each gui component
label = self.__build_label(books)
ok = self.__build_okbutton()
settings = self.__build_settingsbutton()
cancel = self.__build_cancelbutton()
# 2. --- configure this form, and add all the gui components to it
self.AcceptButton = ok
self.CancelButton = cancel
self.AutoScaleMode = AutoScaleMode.Font
self.Text = 'Comic Vine Scraper - v' + resources.SCRIPT_VERSION
self.ClientSize = Size(396, 100)
self.Controls.Add(label)
self.Controls.Add(ok)
self.Controls.Add(cancel)
self.Controls.Add(settings)
# 3. --- define the keyboard focus tab traversal ordering
ok.TabIndex = 0
cancel.TabIndex = 1
label.TabIndex = 2
settings.TabIndex = 3
# ==========================================================================
def __build_label(self, books):
'''
Builds and returns the Label for this form.
'books' -> a list of all the comic books being scraped.
'''
plural = len(books) > 1
label = Label()
label.AutoSize = True
label.Location = Point(9, 10)
label.Size = Size(299, 13)
label.Text = ("You are about to download and store details "+\
'for {0} comic book{1}.\n\n'+\
"Click 'Start Scraping...' to begin.").format(len(books),
"s" if plural else "")
return label
# ==========================================================================
def __build_settingsbutton(self):
''' Builds and returns the settings button for this form. '''
button = Button()
button.Click += self.__show_configform
button.Location = Point(10, 68)
button.Size = Size(80, 23)
button.Text = 'Settings...'
button.UseVisualStyleBackColor = True
return button
# ==========================================================================
def __build_cancelbutton(self):
''' Builds and returns the cancel button for this form. '''
button = Button()
button.DialogResult = DialogResult.Cancel
button.Location = Point(309, 68)
button.Size = Size(75, 23)
button.Text = 'Cancel'
button.UseVisualStyleBackColor = True
return button
# ==========================================================================
def __build_okbutton(self):
''' Builds and returns the ok button for this form. '''
button = Button()
button.DialogResult = DialogResult.OK
button.Location = Point(208, 68)
button.Size = Size(95, 23)
button.Text = 'Start Scraping...'
button.UseVisualStyleBackColor = True
return button
# ==========================================================================
def show_form(self):
'''
Displays this form, blocking until the user closes it. Returns a boolean
indicating whether the user cancelled the dialog and scrape operation
(False) or whether the user clicked ok to continue (True).
'''
dialogAnswer = self.ShowDialog() # blocks
return dialogAnswer == DialogResult.OK;
# ==========================================================================
def __show_configform(self, sender, args):
'''
Displays the configform, blocking until the user closes it. Changes made
to the settings in that form will be saved in the user's profile, where
they can be loaded when needed.
'''
with ConfigForm(self) as config_form:
config_form.show_form() # blocks
| Python |
'''
This module is home to the IssuesForm and IssuesFormResult classes.
@author: Cory Banack
'''
import clr
clr.AddReference('Microsoft.VisualBasic')
from System.ComponentModel import ListSortDirection
clr.AddReference('System.Drawing')
from System.Drawing import Point, Size
clr.AddReference('System.Windows.Forms')
from System.Windows.Forms import AutoScaleMode, Button, \
DataGridViewAutoSizeColumnMode, DataGridViewContentAlignment, \
DataGridViewSelectionMode, DialogResult, Label
from buttondgv import ButtonDataGridView
from cvform import CVForm
from issuecoverpanel import IssueCoverPanel
from utils import sstr
#==============================================================================
class IssueForm(CVForm):
'''
This class is a popup, modal dialog that displays all of the Comic Book
issues in a series. The issues are shown in a table, which the user can
navigate through, browsing the cover art for each issue. Once the user has
selected the issue that matches the comic that she is scraping, she clicks
the ok button to close this dialog and continue scraping her comic using the
identified IssueRef.
'''
#===========================================================================
def __init__(self, scraper, issue_ref_hint, issue_refs, series_name_s):
'''
Initializes this form. If a good issue key hint is given, that issue will
be preselected in the table if possible.
'scraper' -> the currently running ScrapeEngine
'issue_ref_hint' -> may be the issue id for the given book (or may not!)
'issue_refs' -> a set or list containing the IssueRefs to display
'series_name_s' -> the name of the series that the given issues belong to
'''
# the the shared global configuration
self.__config = scraper.config
# a list of IssueRef objects that back this form; one ref per table row,
# where each IssueRef represents an issue that the user can pick
self.__issue_refs = list(issue_refs)
# the ok button for this dialog
self.__ok_button = None
# the label for this dialog
self.__label = None
# the table that displays issues (one per row) for the user to pick from
self.__table = None
# whether or no we were able to pre-select the "hinted" issue in the table
self.__found_issue_in_table = False
# IssueCoverPAnel that shows the cover for the currently selected IssueRef
self.__coverpanel = None
## the index (into self.__issue_refs) of the currently selected IssueRef
self.__chosen_index = None
if len(issue_refs) <= 0:
raise Exception("do not invoke the IssueForm with no IssueRefs!")
CVForm.__init__(self, scraper.comicrack.MainWindow, "issueformLocation")
self.__build_gui(issue_ref_hint, series_name_s)
scraper.cancel_listeners.append(self.Close)
# ==========================================================================
def __build_gui(self, issue_ref_hint, series_name_s):
''' Constructs and initializes the gui for this form. '''
# 1. --- build each gui component
self.__ok_button = self.__build_okbutton()
skip_button = self.__build_skipbutton()
back_button = self.__build_backbutton()
self.__table = self.__build_table(
self.__issue_refs, issue_ref_hint, series_name_s, self.__ok_button)
self.__label = self.__build_label() # must build AFTER table is built!
self.__coverpanel = self.__build_coverpanel()
# 2. --- configure this form, and add all the gui components to it
self.AutoScaleMode = AutoScaleMode.Font
self.ClientSize = Size(730, 395)
self.Text = 'Choose a Comic Book Issue'
self.FormClosed += self.__form_closed_fired
self.Controls.Add (self.__label)
self.Controls.Add(self.__table)
self.Controls.Add(self.__ok_button)
self.Controls.Add(skip_button)
self.Controls.Add(back_button)
self.Controls.Add(self.__coverpanel) # must be added LAST
# 3. --- define the keyboard focus tab traversal ordering
self.__ok_button.TabIndex = 1
skip_button.TabIndex = 2
back_button.TabIndex = 3
self.__coverpanel.TabIndex = 4
self.__table.TabIndex = 5
#4. --- make sure the UI goes into a good initial state
self.__change_table_selection_fired(None, None)
# ==========================================================================
def __build_table(self, issue_refs, issue_ref_hint,
series_name_s, enter_button):
'''
Builds and returns the table for this form. If a good issue key hint is
given, that issue will be preselected in the table if possible.
'issue_refs' -> a list with one IssueRef object for each row in the table
'issue_ref_hint' -> may be the issue key for the given book (or may not!)
'series_name_s' -> the name of the series to which all issues belong
'enter_button' -> the button to "press" if the user hits enter
'''
# 1. --- configure the table itself
table = ButtonDataGridView(enter_button)
table.AllowUserToOrderColumns = True
table.SelectionMode = DataGridViewSelectionMode.FullRowSelect
table.MultiSelect = False
table.ReadOnly = True
table.RowHeadersVisible = False
table.AllowUserToAddRows = False
table.AllowUserToResizeRows = False
table.AllowUserToResizeColumns = False
table.DefaultCellStyle.NullValue = "--"
table.AutoResizeColumns
if self.__config.show_covers_b:
table.Size = Size(500, 290)
table.Location = Point(218, 60)
else:
table.Size = Size(708, 290)
table.Location = Point(10, 60)
# 2. --- build columns
table.ColumnCount = 4
table.Columns[0].Name = "Series"
table.Columns[0].DefaultCellStyle.Alignment =\
DataGridViewContentAlignment.MiddleLeft
table.Columns[0].AutoSizeMode = \
DataGridViewAutoSizeColumnMode.Fill
table.Columns[1].Name = "Issue"
table.Columns[1].DefaultCellStyle.Alignment =\
DataGridViewContentAlignment.MiddleCenter
table.Columns[1].AutoSizeMode = \
DataGridViewAutoSizeColumnMode.AllCells
table.Columns[2].Name = "ID"
table.Columns[2].Visible = False
table.Columns[2].DefaultCellStyle.Alignment =\
DataGridViewContentAlignment.MiddleCenter
table.Columns[2].AutoSizeMode = \
DataGridViewAutoSizeColumnMode.AllCells
table.Columns[3].Name = "Model ID"
table.Columns[3].Visible = False
table.Columns[3].DefaultCellStyle.Alignment =\
DataGridViewContentAlignment.MiddleCenter
table.Columns[3].AutoSizeMode =\
DataGridViewAutoSizeColumnMode.AllCells
# 3. --- copy model data into the table, each issue is a row
for i in range(len(issue_refs)):
name = series_name_s
key = issue_refs[i].issue_key
# corylow: move this 'replace' into the IssueModel object
issue_num_s = issue_refs[i].issue_num_s.replace('.00', '')
table.Rows.Add()
table.Rows[i].Cells[0].Value = name
if issue_num_s:
table.Rows[i].Cells[1].Value = float(issue_num_s)
table.Rows[i].Cells[2].Value = key
table.Rows[i].Cells[3].Value = i
# 4. --- sort on the "Issue" column, and then preselect a row based on
# the give issue ID hint, or at least the first row if nothing else
table.Sort(table.Columns[1], ListSortDirection.Ascending)
if issue_ref_hint:
for i in range(len(issue_refs)):
if table.Rows[i].Cells[2].Value == issue_ref_hint.issue_key:
table.CurrentCell = table.Rows[i].Cells[0]
self.__found_issue_in_table = True
break
if not self.__found_issue_in_table:
table.CurrentCell = table.Rows[0].Cells[0]
table.SelectionChanged += self.__change_table_selection_fired
return table
# ==========================================================================
def __build_okbutton(self):
''' builds and returns the ok button for this form '''
button = Button()
button.DialogResult = DialogResult.OK
button.Location = Point(223, 362)
button.Size = Size(80, 24)
button.Text = '&Ok'
return button
# ==========================================================================
def __build_skipbutton(self):
''' builds and returns the skip button for this form '''
button = Button()
button.DialogResult = DialogResult.Ignore
button.Location = Point(308, 362)
button.Size = Size(80, 24)
button.Text = '&Skip'
return button
# ==========================================================================
def __build_backbutton(self):
''' builds and returns the back button for this form '''
button = Button()
button.DialogResult = DialogResult.Retry
button.Location = Point(628, 362)
button.Size = Size(90, 24)
button.Text = 'Go &Back'
return button
# ==========================================================================
def __build_label(self):
''' builds and returns the main text label for this form '''
label = Label()
label.AutoSize = True
label.Text =\
"Please choose the correct issue from the following list."\
if self.__found_issue_in_table else\
"Could not automatically identify the issue for this comic.\n"\
"Please choose the correct issue from the following list."
if self.__config.show_covers_b:
label.Location = Point(218, 20)
else:
label.Location = Point(10, 20)
return label
# ==========================================================================
def __build_coverpanel(self):
''' builds and returns the IssueCoverPanel for this form '''
panel = IssueCoverPanel(self.__config)
panel.Location = Point(10, 30)
# panel size is determined by the panel itself
return panel
# ==========================================================================
def show_form(self):
'''
Displays this form, blocking until the user closes it. When it is closed,
it will return an IssueFormResult describing how it was closed, and any
IssueRef that may have been chosen when it was closed.
'''
dialogAnswer = self.ShowDialog(self.Owner) # blocks
if dialogAnswer == DialogResult.OK:
result = IssueFormResult( IssueFormResult.OK,
self.__issue_refs[self.__chosen_index] )
alt_image_ref = self.__coverpanel.get_alt_cover_image_url()
if alt_image_ref:
# the user chose a non-default cover image for this issue.
# we'll store that choice in the global "session data map",
# in case any other part of the program wants to use it.
alt_cover_key = sstr(result) + "-altcover"
self.__config.session_data_map[alt_cover_key] = alt_image_ref
elif dialogAnswer == DialogResult.Cancel:
result = IssueFormResult( IssueFormResult.CANCEL )
elif dialogAnswer == DialogResult.Ignore:
result = IssueFormResult( IssueFormResult.SKIP )
elif dialogAnswer == DialogResult.Retry:
result = IssueFormResult( IssueFormResult.BACK )
else:
raise Exception()
return result
# ==========================================================================
def __form_closed_fired(self, sender, args):
''' this method is called whenever this IssueForm is closed. '''
self.__table.Dispose()
self.__coverpanel.free()
self.Closed -= self.__form_closed_fired
# ==========================================================================
def __change_table_selection_fired(self, sender, args):
''' this method is called whenever the table's selected row changes '''
# update __chosen_index (eventually used as this dialog's return value)
# and then also use it to update the displayed cover image
selected_rows = self.__table.SelectedRows
if selected_rows.Count == 1:
self.__chosen_index = selected_rows[0].Cells[3].Value
self.__coverpanel.set_issue(
self.__issue_refs[self.__chosen_index] )
else:
self.__chosen_index = None
self.__coverpanel.set_issue( None )
# don't let the user click 'ok' if no row is selected!
self.__ok_button.Enabled = selected_rows.Count == 1
#==============================================================================
class IssueFormResult(object):
'''
Results that can be returned from the IssueForm.show_form() method. The
'name' of this object describes the manner in which the user closed the
dialog:
1) IssueFormResult.CANCEL means the user cancelled this scrape operation.
2) IssueFormResult.SKIP means the user elected to skip the current book.
3) IssueFormResult.BACK means the user chose to return to the SeriesForm
4) IssueFormResult.OK means the user chose an IssueRef from those displayed
Note that if the IssueFormResult has a name of 'OK', it should also have a
non-None 'ref', which is of course the actual IssueRef that the user chose.
'''
OK = "ok"
CANCEL = "cancel"
SKIP = "skip"
BACK = "back"
#===========================================================================
def __init__(self, name, ref=None):
'''
Creates a new IssueFormResult.
name -> the name of the result, i.e. based on what button the user pressed
ref -> the reference that the user chose, if they chose one at all.
'''
if name != self.OK and name != self.CANCEL and \
name != self.SKIP and name != self.BACK:
raise Exception();
self.__ref = ref if name == self.OK else None;
self.__name = name;
#===========================================================================
def get_name(self):
''' Gets the 'name' portion of this result (see possibilities above) '''
return self.__name;
#===========================================================================
def get_ref(self):
'''
Gets the IssueRef portion of this result, i.e. the one the user picked.
This is only defined when the'name' of this result is "OK".
'''
return self.__ref;
#===========================================================================
def get_debug_string(self):
''' Gets a simple little debug string summarizing this result.'''
if self.get_name() == self.SKIP:
return "SKIP scraping this book"
elif self.get_name() == self.CANCEL:
return "CANCEL this scrape operation"
elif self.get_name() == self.BACK:
return "GO BACK to the series dialog"
elif self.get_name() == self.OK:
return "SCRAPE using: '" + sstr(self.get_ref()) + "'"
else:
raise Exception()
| Python |
'''
@author: Cory Banack
'''
# corylow: comment and cleanup this file
import clr
import log
from cvform import CVForm
from configuration import Configuration
clr.AddReference('System.Windows.Forms')
from System.Windows.Forms import AutoScaleMode, Button, CheckBox, \
CheckedListBox, DialogResult, FlatStyle, Label, SelectionMode, \
TabControl, TabPage
clr.AddReference('System.Drawing')
from System.Drawing import Point, Size
# =============================================================================
class ConfigForm(CVForm):
'''
This class is a popup, modal dialog that displays all of the configurable
options available to the user. The user can change any of the options,
and then click OK or Cancel to quit the dialog and contine the normal
execution of the program. Clicking Cancel will discard any configuration
changes that were made; clicking OK will save them permanently.
'''
# these are the strings that the user sees for each checkbox; they can
# also be used to reference each checkbox inside the checkboxlist
__SERIES_CB = 'Series'
__NUMBER_CB = 'Number'
__MONTH_CB = 'Month'
__TITLE_CB = 'Title'
__ALT_SERIES_CB = 'Alternate Series'
__WRITER_CB = 'Writer'
__PENCILLER_CB = 'Penciller'
__INKER_CB = 'Inker'
__COVER_ARTIST_CB = 'Cover Artist'
__COLORIST_CB = 'Colorist'
__LETTERER_CB = 'Letterer'
__EDITOR_CB = 'Editor'
__SUMMARY_CB = 'Summary'
__YEAR_CB = 'Year'
__IMPRINT_CB = 'Imprint'
__PUBLISHER_CB = 'Publisher'
__VOLUME_CB = 'Volume'
__CHARACTERS_CB = 'Characters'
__TEAMS_CB = 'Teams'
__LOCATIONS_CB = 'Locations'
__WEBPAGE_CB = 'Webpage'
__RATING_CB = 'Rating'
# ==========================================================================
def __init__(self, owner):
'''
Initializes this form.
owner -> this form's owner window/dialog
'''
# the ok button for this dialog
self.__ok_button = None
# the cancel button for this dialog
self.__cancel_button = None
# the restore defaults button for this dialog
self.__restore_button = None
# "options" checkboxes
self.__ow_existing_cb = None
self.__ignore_blanks_cb = None
self.__specify_series_cb = None
self.__convert_imprints_cb = None
self.__show_covers_cb = None
self.__summary_dialog_cb = None
self.__download_thumbs_cb = None
self.__preserve_thumbs_cb = None
self.__scrape_in_groups_cb = None
self.__fast_rescrape_cb = None
self.__rescrape_tags_cb = None
self.__rescrape_notes_cb = None
# "data" checkbox list
self.__update_checklist = None
CVForm.__init__(self, owner, "configformLocation")
self.__build_gui()
# ==========================================================================
def __build_gui(self):
''' Constructs and initializes the gui for this form. '''
# 1. --- build each gui component
self.__ok_button = self.__build_okbutton()
self.__cancel_button = self.__build_cancel_button()
self.__restore_button = self.__build_restore_button()
tabcontrol = self.__build_tabcontrol()
# 2. -- configure this form, and add all the gui components to it
self.AutoScaleMode = AutoScaleMode.Font
self.ClientSize = Size(396, 335)
self.Text = 'Comic Vine Scraper Settings'
self.Controls.Add(self.__ok_button)
self.Controls.Add(self.__cancel_button)
self.Controls.Add(self.__restore_button)
self.Controls.Add(tabcontrol)
# 3. -- define the keyboard focus tab traversal ordering
self.__ok_button.TabIndex = 0
self.__cancel_button.TabIndex = 1
self.__restore_button.TabIndex = 2
tabcontrol.TabIndex = 3
self.__update_gui_fired()
# ==========================================================================
def __build_okbutton(self):
''' builds and returns the ok button for this form '''
button = Button()
button.DialogResult = DialogResult.OK
button.Location = Point(228, 300)
button.Size = Size(75, 23)
button.Text = '&Ok'
return button
# ==========================================================================
def __build_restore_button(self):
''' builds and returns the restore button for this form '''
button = Button()
button.Click += self.__restore_defaults_fired
button.Location = Point(10, 300)
button.Size = Size(150, 23)
button.Text = '&Restore Defaults'
return button
# ==========================================================================
def __build_cancel_button(self):
''' builds and returns the cancel button for this form '''
button = Button()
button.DialogResult = DialogResult.Cancel
button.Location = Point(309, 300)
button.Size = Size(75, 23)
button.Text = '&Cancel'
return button
# ==========================================================================
def __build_tabcontrol(self):
''' builds and returns the TabControl for this dialog '''
tabcontrol = TabControl()
tabcontrol.Location = Point(10, 15)
tabcontrol.Size = Size(375, 265)
tabcontrol.Controls.Add( self.__build_detailstab() )
tabcontrol.Controls.Add( self.__build_behaviourtab() )
tabcontrol.Controls.Add( self.__build_datatab() )
return tabcontrol
# ==========================================================================
def __build_detailstab(self):
''' builds and returns the "Details" Tab for the TabControl '''
tabpage = TabPage()
tabpage.Text = "Details"
# 1. --- a description label for this tabpage
label = Label()
label.AutoSize = True
label.Location = Point(9, 20)
label.Size = Size(299, 13)
label.Text = 'Please choose which details you want to update:'
# 2. --- the 'select all' button
checkall_button = Button()
checkall_button.Click += self.__checkall_fired
checkall_button.Location = Point(275, 97)
checkall_button.Size = Size(80, 23)
checkall_button.Text = 'Select &All'
# 3. --- the 'deselect all' button
uncheckall_button = Button()
uncheckall_button.Click += self.__uncheckall_fired
uncheckall_button.Location = Point(275, 128)
uncheckall_button.Size = Size(80, 23)
uncheckall_button.Text = 'Select &None'
# 4. --- build the update checklist (contains all the 'data' checkboxes)
self.__update_checklist = CheckedListBox()
self.__update_checklist.CheckOnClick = True
self.__update_checklist.ColumnWidth = 120
self.__update_checklist.ThreeDCheckBoxes = True
self.__update_checklist.Location = Point(15, 45)
self.__update_checklist.MultiColumn = True
self.__update_checklist.SelectionMode = SelectionMode.One
self.__update_checklist.Size = Size(250, 180)
self.__update_checklist.ItemCheck += self.__update_gui_fired
self.__update_checklist.Items.Add(ConfigForm.__SERIES_CB)
self.__update_checklist.Items.Add(ConfigForm.__VOLUME_CB)
self.__update_checklist.Items.Add(ConfigForm.__NUMBER_CB)
self.__update_checklist.Items.Add(ConfigForm.__TITLE_CB)
self.__update_checklist.Items.Add(ConfigForm.__MONTH_CB)
self.__update_checklist.Items.Add(ConfigForm.__YEAR_CB)
self.__update_checklist.Items.Add(ConfigForm.__ALT_SERIES_CB)
self.__update_checklist.Items.Add(ConfigForm.__PUBLISHER_CB)
self.__update_checklist.Items.Add(ConfigForm.__IMPRINT_CB)
self.__update_checklist.Items.Add(ConfigForm.__WRITER_CB)
self.__update_checklist.Items.Add(ConfigForm.__PENCILLER_CB)
self.__update_checklist.Items.Add(ConfigForm.__INKER_CB)
self.__update_checklist.Items.Add(ConfigForm.__COLORIST_CB)
self.__update_checklist.Items.Add(ConfigForm.__LETTERER_CB)
self.__update_checklist.Items.Add(ConfigForm.__COVER_ARTIST_CB)
self.__update_checklist.Items.Add(ConfigForm.__EDITOR_CB)
self.__update_checklist.Items.Add(ConfigForm.__SUMMARY_CB)
self.__update_checklist.Items.Add(ConfigForm.__CHARACTERS_CB)
self.__update_checklist.Items.Add(ConfigForm.__TEAMS_CB)
self.__update_checklist.Items.Add(ConfigForm.__LOCATIONS_CB)
self.__update_checklist.Items.Add(ConfigForm.__WEBPAGE_CB)
self.__update_checklist.Items.Add(ConfigForm.__RATING_CB)
# 5. --- add 'em all to this tabpage
tabpage.Controls.Add(label)
tabpage.Controls.Add(checkall_button)
tabpage.Controls.Add(uncheckall_button)
tabpage.Controls.Add(self.__update_checklist)
return tabpage
# ==========================================================================
def __build_behaviourtab(self):
''' builds and returns the "Behaviour" Tab for the TabControl '''
tabpage = TabPage()
tabpage.Text = "Behaviour"
# 1. -- build the 'use fast rescrape' checkbox
self.__fast_rescrape_cb = CheckBox()
self.__fast_rescrape_cb.AutoSize = True
self.__fast_rescrape_cb.FlatStyle = FlatStyle.System
self.__fast_rescrape_cb.Location = Point(52, 15)
self.__fast_rescrape_cb.Size = Size(218, 17)
self.__fast_rescrape_cb.Text = \
"Use previous choice when 'rescraping' comics"
self.__fast_rescrape_cb.CheckedChanged += self.__update_gui_fired
# 2. -- build the 'add rescrape hints to tags' checkbox
self.__rescrape_tags_cb = CheckBox()
self.__rescrape_tags_cb.AutoSize = True
self.__rescrape_tags_cb.FlatStyle = FlatStyle.System
self.__rescrape_tags_cb.Location = Point(82, 40)
self.__rescrape_tags_cb.Size = Size(218, 17)
self.__rescrape_tags_cb.Text = "Save that choice in 'Tags'"
self.__rescrape_tags_cb.CheckedChanged += self.__update_gui_fired
# 3. -- build the 'add rescrape hints to notes' checkbox
self.__rescrape_notes_cb = CheckBox()
self.__rescrape_notes_cb.AutoSize = True
self.__rescrape_notes_cb.FlatStyle = FlatStyle.System
self.__rescrape_notes_cb.Location = Point(82, 65)
self.__rescrape_notes_cb.Size = Size(218, 17)
self.__rescrape_notes_cb.Text = "Save that choice in 'Notes'"
self.__rescrape_notes_cb.CheckedChanged += self.__update_gui_fired
# 4. --- build the 'scrape in groups'
self.__scrape_in_groups_cb = CheckBox()
self.__scrape_in_groups_cb.AutoSize = True
self.__scrape_in_groups_cb.FlatStyle = FlatStyle.System
self.__scrape_in_groups_cb.Location = Point(52, 95)
self.__scrape_in_groups_cb.Size = Size(250, 17)
self.__scrape_in_groups_cb.Text = \
"When several comics appear to be from the same\n" \
"series, only ask about the first one."
self.__scrape_in_groups_cb.CheckedChanged += self.__update_gui_fired
# 5. --- build the 'specify series name' checkbox
self.__specify_series_cb = CheckBox()
self.__specify_series_cb.AutoSize = True
self.__specify_series_cb.FlatStyle = FlatStyle.System
self.__specify_series_cb.Location = Point(52, 140)
self.__specify_series_cb.Size = Size(250, 17)
self.__specify_series_cb.Text = \
'Confirm each series name before searching for it'
self.__specify_series_cb.CheckedChanged += self.__update_gui_fired
# 6. --- build the 'display cover art' checkbox
self.__show_covers_cb = CheckBox()
self.__show_covers_cb.AutoSize = True
self.__show_covers_cb.FlatStyle = FlatStyle.System
self.__show_covers_cb.Location = Point(52, 173)
self.__show_covers_cb.Size = Size(250, 17)
self.__show_covers_cb.Text = \
'When possible, display comic book cover images'
self.__show_covers_cb.CheckedChanged += self.__update_gui_fired
# 7. --- build the 'specify series name' checkbox
self.__summary_dialog_cb = CheckBox()
self.__summary_dialog_cb.AutoSize = True
self.__summary_dialog_cb.FlatStyle = FlatStyle.System
self.__summary_dialog_cb.Location = Point(52, 205)
self.__summary_dialog_cb.Size = Size(250, 17)
self.__summary_dialog_cb.Text = \
'Show summary message when finished scraping'
self.__summary_dialog_cb.CheckedChanged += self.__update_gui_fired
# 8. --- add 'em all to the tabpage
tabpage.Controls.Add(self.__scrape_in_groups_cb)
tabpage.Controls.Add(self.__fast_rescrape_cb)
tabpage.Controls.Add(self.__rescrape_tags_cb)
tabpage.Controls.Add(self.__rescrape_notes_cb)
tabpage.Controls.Add(self.__specify_series_cb)
tabpage.Controls.Add(self.__summary_dialog_cb)
tabpage.Controls.Add(self.__show_covers_cb)
return tabpage
# ==========================================================================
def __build_datatab(self):
''' builds and returns the "Data" Tab for the TabControl '''
tabpage = TabPage()
tabpage.Text = "Data"
# 1. --- build the 'convert imprints checkbox'
self.__convert_imprints_cb = CheckBox()
self.__convert_imprints_cb.AutoSize = True
self.__convert_imprints_cb.FlatStyle = FlatStyle.System
self.__convert_imprints_cb.Location = Point(52, 35)
self.__convert_imprints_cb.Size = Size(250, 17)
self.__convert_imprints_cb.Text = \
'Convert scraped imprints to parent publisher'
self.__convert_imprints_cb.CheckedChanged += self.__update_gui_fired
# 2. -- build the 'overwrite existing' checkbox
self.__ow_existing_cb = CheckBox()
self.__ow_existing_cb.AutoSize = True
self.__ow_existing_cb.FlatStyle = FlatStyle.System
self.__ow_existing_cb.Location = Point(52, 85)
self.__ow_existing_cb.Size = Size(218, 17)
self.__ow_existing_cb.Text = \
'Allow scraper to overwrite existing values in comics'
self.__ow_existing_cb.CheckedChanged += self.__update_gui_fired
# 3. --- build the 'ignore blanks' checkbox
self.__ignore_blanks_cb = CheckBox()
self.__ignore_blanks_cb.AutoSize = True
self.__ignore_blanks_cb.FlatStyle = FlatStyle.System
self.__ignore_blanks_cb.Location = Point(82, 110)
self.__ignore_blanks_cb.Size = Size(250, 17)
self.__ignore_blanks_cb.Text =\
"...except when the new values would be empty"
self.__ignore_blanks_cb.CheckedChanged += self.__update_gui_fired
# 4. --- build the 'download thumbnails' checkbox
self.__download_thumbs_cb = CheckBox()
self.__download_thumbs_cb.AutoSize = True
self.__download_thumbs_cb.FlatStyle = FlatStyle.System
self.__download_thumbs_cb.Location = Point(52, 160)
self.__download_thumbs_cb.Size = Size(250, 17)
self.__download_thumbs_cb.Text = \
'Update thumbnails for fileless comics'
self.__download_thumbs_cb.CheckedChanged += self.__update_gui_fired
# 5. --- build the 'preserve thumbnails' checkbox
self.__preserve_thumbs_cb = CheckBox()
self.__preserve_thumbs_cb.AutoSize = True
self.__preserve_thumbs_cb.FlatStyle = FlatStyle.System
self.__preserve_thumbs_cb.Location = Point(82, 185)
self.__preserve_thumbs_cb.Size = Size(250, 17)
self.__preserve_thumbs_cb.Text = \
'...&except when they already have thumbnails'
self.__preserve_thumbs_cb.CheckedChanged += self.__update_gui_fired
# 6. --- add 'em all to the tabpage
tabpage.Controls.Add(self.__ow_existing_cb)
tabpage.Controls.Add(self.__ignore_blanks_cb)
tabpage.Controls.Add(self.__convert_imprints_cb)
tabpage.Controls.Add(self.__download_thumbs_cb)
tabpage.Controls.Add(self.__preserve_thumbs_cb)
return tabpage
# ==========================================================================
def show_form(self):
'''
Displays this form, blocking until the user closes it. When it is closed,
this method will return a Configuration object containing the settings
that this dialog was displaying when it was closed (these settings were
also just saved on the filesystem, so they are also the settings that
this dialog will display the next time it is opened.)
If the user clicks 'Cancel' then this method will simply return null.
'''
log.debug("opened the settings dialog.")
defaults = Configuration()
defaults.load_defaults()
self.__set_configuration(defaults)
dialogAnswer = self.ShowDialog() # blocks
if dialogAnswer == DialogResult.OK:
config = self.__get_configuration()
config.save_defaults()
log.debug("closed the settings dialog.")
else:
config = None
log.debug("cancelled the settings dialog.")
return config
# ==========================================================================
def __get_configuration(self):
'''
Returns a Configuration object the describes the current state of all the
gui components on this ConfigForm.
'''
def is_checked( checkbox ):
return self.__update_checklist.GetItemChecked( \
self.__update_checklist.Items.IndexOf(checkbox) )
config = Configuration()
# 1. --- first get the parts from the checklist box (data tab)
config.update_series_b = is_checked(ConfigForm.__SERIES_CB)
config.update_number_b = is_checked(ConfigForm.__NUMBER_CB)
config.update_month_b = is_checked(ConfigForm.__MONTH_CB)
config.update_title_b = is_checked(ConfigForm.__TITLE_CB)
config.update_alt_series_b = is_checked(ConfigForm.__ALT_SERIES_CB)
config.update_writer_b = is_checked(ConfigForm.__WRITER_CB)
config.update_penciller_b = is_checked(ConfigForm.__PENCILLER_CB)
config.update_inker_b = is_checked(ConfigForm.__INKER_CB)
config.update_cover_artist_b = is_checked(ConfigForm.__COVER_ARTIST_CB)
config.update_colorist_b = is_checked(ConfigForm.__COLORIST_CB)
config.update_letterer_b = is_checked(ConfigForm.__LETTERER_CB)
config.update_editor_b = is_checked(ConfigForm.__EDITOR_CB)
config.update_summary_b = is_checked(ConfigForm.__SUMMARY_CB)
config.update_year_b = is_checked(ConfigForm.__YEAR_CB)
config.update_imprint_b = is_checked(ConfigForm.__IMPRINT_CB)
config.update_publisher_b = is_checked(ConfigForm.__PUBLISHER_CB)
config.update_volume_b = is_checked(ConfigForm.__VOLUME_CB)
config.update_characters_b = is_checked(ConfigForm.__CHARACTERS_CB)
config.update_teams_b = is_checked(ConfigForm.__TEAMS_CB)
config.update_locations_b = is_checked(ConfigForm.__LOCATIONS_CB)
config.update_webpage_b = is_checked(ConfigForm.__WEBPAGE_CB)
config.update_rating_b = is_checked(ConfigForm.__RATING_CB)
# 2. --- then get the parts from the other checkboxes (options tab)
config.ow_existing_b = self.__ow_existing_cb.Checked
config.convert_imprints_b = self.__convert_imprints_cb.Checked
config.specify_series_b = self.__specify_series_cb.Checked
config.ignore_blanks_b = self.__ignore_blanks_cb.Checked
config.show_covers_b = self.__show_covers_cb.Checked
config.download_thumbs_b = self.__download_thumbs_cb.Checked
config.preserve_thumbs_b = self.__preserve_thumbs_cb.Checked
config.fast_rescrape_b = self.__fast_rescrape_cb.Checked
config.scrape_in_groups_b = self.__scrape_in_groups_cb.Checked
config.rescrape_notes_b = self.__rescrape_notes_cb.Checked
config.rescrape_tags_b = self.__rescrape_tags_cb.Checked
config.summary_dialog_b = self.__summary_dialog_cb.Checked
return config
# ==========================================================================
def __set_configuration(self, config):
'''
Sets the state of all the gui components on this ConfigForm to match the
contents of the given Configuration object.
'''
def set_checked( checkbox, checked ):
self.__update_checklist.SetItemChecked( \
self.__update_checklist.Items.IndexOf(checkbox), checked )
# 1. --- set get the parts in the checklist box (data tab)
set_checked(ConfigForm.__SERIES_CB, config.update_series_b)
set_checked(ConfigForm.__NUMBER_CB, config.update_number_b)
set_checked(ConfigForm.__MONTH_CB, config.update_month_b)
set_checked(ConfigForm.__TITLE_CB, config.update_title_b)
set_checked(ConfigForm.__ALT_SERIES_CB, config.update_alt_series_b)
set_checked(ConfigForm.__WRITER_CB, config.update_writer_b)
set_checked(ConfigForm.__PENCILLER_CB, config.update_penciller_b)
set_checked(ConfigForm.__INKER_CB, config.update_inker_b)
set_checked(ConfigForm.__COVER_ARTIST_CB,config.update_cover_artist_b)
set_checked(ConfigForm.__COLORIST_CB, config.update_colorist_b)
set_checked(ConfigForm.__LETTERER_CB, config.update_letterer_b)
set_checked(ConfigForm.__EDITOR_CB, config.update_editor_b)
set_checked(ConfigForm.__SUMMARY_CB, config.update_summary_b)
set_checked(ConfigForm.__YEAR_CB, config.update_year_b)
set_checked(ConfigForm.__IMPRINT_CB, config.update_imprint_b)
set_checked(ConfigForm.__PUBLISHER_CB, config.update_publisher_b)
set_checked(ConfigForm.__VOLUME_CB, config.update_volume_b)
set_checked(ConfigForm.__CHARACTERS_CB, config.update_characters_b)
set_checked(ConfigForm.__TEAMS_CB, config.update_teams_b)
set_checked(ConfigForm.__LOCATIONS_CB, config.update_locations_b)
set_checked(ConfigForm.__WEBPAGE_CB, config.update_webpage_b)
set_checked(ConfigForm.__RATING_CB, config.update_rating_b)
# 2. --- then get the parts in the other checkboxes (options tab)
self.__ow_existing_cb.Checked = config.ow_existing_b
self.__convert_imprints_cb.Checked = config.convert_imprints_b
self.__specify_series_cb.Checked = config.specify_series_b
self.__ignore_blanks_cb.Checked = config.ignore_blanks_b
self.__show_covers_cb.Checked = config.show_covers_b
self.__download_thumbs_cb.Checked = config.download_thumbs_b
self.__preserve_thumbs_cb.Checked = config.preserve_thumbs_b
self.__fast_rescrape_cb.Checked = config.fast_rescrape_b
self.__scrape_in_groups_cb.Checked = config.scrape_in_groups_b
self.__rescrape_notes_cb.Checked = config.rescrape_notes_b
self.__rescrape_tags_cb.Checked = config.rescrape_tags_b
self.__summary_dialog_cb.Checked = config.summary_dialog_b
self.__update_gui_fired()
# ==========================================================================
def __restore_defaults_fired(self, sender, args):
''' called when the user clicks the "restore defaults" button '''
self.__set_configuration(Configuration())
log.debug("all settings were restored to their default values")
self.__update_gui_fired()
# ==========================================================================
def __update_gui_fired(self, sender = None, args = None):
''' called anytime the gui for this form should be updated '''
self.__ignore_blanks_cb.Enabled = self.__ow_existing_cb.Checked
self.__preserve_thumbs_cb.Enabled = self.__download_thumbs_cb.Checked
# ==========================================================================
def __checkall_fired(self, sender, args):
''' called when the user clicks the "select all" button '''
for i in range(self.__update_checklist.Items.Count):
self.__update_checklist.SetItemChecked(i, True)
# ==========================================================================
def __uncheckall_fired(self, sender, args):
''' called when the user clicks the "select none" button '''
for i in range(self.__update_checklist.Items.Count):
self.__update_checklist.SetItemChecked(i, False)
| Python |
'''
@author: Cory Banack
'''
#corylow: comment and cleanup this file
from cvform import CVForm
import clr
import log
import resources
import utils
from utils import sstr
clr.AddReference('IronPython')
clr.AddReference('System')
from System import GC
clr.AddReference('System.Drawing')
from System.Drawing import Point, Rectangle, Size
from System.Threading import Monitor, Thread, ThreadStart, \
ThreadExceptionEventHandler
clr.AddReference('System.Windows.Forms')
from System.Windows.Forms import AnchorStyles, Application, AutoScaleMode, \
Button, FormBorderStyle, Label, Panel, PictureBox, ProgressBar, \
PictureBoxSizeMode
# =============================================================================
class ComicForm(CVForm):
'''
This class is a standalone dialog that displays the current status of
a ScrapeEngine. The dialog displays an image of the ComicBook that's
currently being scraped, along with a few other details, and a 'cancel'
button that the user can use at any time to cancel the entire scraping
operation.
This class runs its own Application message loop, so that it can be
responsive at all times, even when the main application's message loop is
busy with network io. Since most of the rest of the scraper application is
running on a different thread than this form, it is important to remember
to properly 'invoke' any code that makes calls between this form and any
other forms (or vice versa.)
'''
# ==========================================================================
def __init__(self, scraper):
'''
Initializes this form.
'scraper' -> the currently running ScrapeEngine
'''
CVForm.__init__(self, scraper.comicrack.MainWindow, \
"comicformLocation", "comicformSize")
self.__cancel_on_close = True
self.__scraper = scraper
self.__already_closed = False
self.__last_scraped_book = None
self.__build_gui()
# ==========================================================================
def __build_gui(self):
''' Constructs and initializes the gui for this form. '''
# 1. --- build each gui component
self.__progbar = self.__build_progbar()
self.__label = self.__build_label()
self.__pbox_panel = self.__build_pboxpanel()
self.__cancel_button = self.__build_cancelbutton()
# 2. -- configure this form, and add all the gui components to it
self.Text = 'Comic Vine Scraper'
self.AutoScaleMode = AutoScaleMode.Font
self.ClientSize = Size(346, 604)
self.MinimumSize = Size(166,275)
self.FormBorderStyle = FormBorderStyle.SizableToolWindow
self.Icon = None
self.Controls.Add(self.__progbar)
self.Controls.Add(self.__label)
self.Controls.Add(self.__pbox_panel)
self.Controls.Add(self.__cancel_button)
# 3. -- set up some listeners
self.__scraper.start_scrape_listeners.append(self.__start_scrape)
self.__scraper.cancel_listeners.append(self.close_threadsafe)
self.FormClosing += self.__form_closing_fired
self.FormClosed += self.__form_closed_fired
# 4. -- define the keyboard focus tab traversal ordering
self.__cancel_button.TabIndex = 0
# ==========================================================================
def __build_progbar(self):
''' builds and returns the progress bar for this form '''
pb = ProgressBar()
pb.Minimum = 0
pb.Maximum = 0
pb.Step = 1
pb.Value = 0
pb.Anchor = AnchorStyles.Top | AnchorStyles.Left | AnchorStyles.Right
pb.Width = 320
pb.Height = 20
pb.Location = Point(13, 15)
return pb
# ==========================================================================
def __build_label(self):
''' builds and returns the label for this form '''
label = Label() # test is updated by the 'update' method
label.Text = 'Scraping comic book info from Comicvine...'
label.Location = Point(13, 45)
label.Size = Size(320, 15)
label.Anchor = AnchorStyles.Top | AnchorStyles.Left |AnchorStyles.Right
label.AutoSize = False
return label
# ==========================================================================
def __build_pboxpanel(self):
''' builds and returns the picturebox panel for this form '''
pbox = _PictureBoxPanel()
pbox.Location = Point (13, 65)
pbox.Size = Size(320, 496)
pbox.Anchor = AnchorStyles.Top | \
AnchorStyles.Bottom | AnchorStyles.Left | AnchorStyles.Right
return pbox
# ==========================================================================
def __build_cancelbutton(self):
''' builds and returns the cancel button for this form '''
button = Button()
button.Text="Cancel Scrape" # gets updated by the 'update' method
def cancel(sender, args):
button.Enabled = False
self.Close()
button.Click+=cancel
button.Location = Point(98, 572)
button.Size = Size(150, 23)
button.Anchor = AnchorStyles.Bottom
return button
# ==========================================================================
def __start_scrape(self, book, num_remaining):
'''
This method gets called once for each comic that the ScrapeEngine is
scraping; the call happens just before the scrape begins. The method
updates all necessary graphical components to reflect the current scrape.
'book' -> the comic book object that is about to be scraped
'num_remaining' -> the # of books left to scrape (including current one)
'''
# 1. obtain a nice filename string to put into out Label
book_name = book.filename_ext_s
fileless = False if book_name else True
if fileless:
# 1a. this is a fileless book, so use it's series name
book_name = book.series_s
if not book_name:
book_name = '<unknown>' # generally shouldn't happen
book_name += (' #' + book.issue_num_s) if book.issue_num_s else ''
book_name += (' (Vol. ' + sstr(book.volume_n) +")") \
if book.volume_n >= 0 else (' (' + sstr(book.year_n) +')') \
if book.year_n >= 0 else ''
# 2. obtain a copy of the cover page of the book
cover_image = book.get_cover_image()
# 3. install those values into the ComicForm. update progressbar.
def delegate():
# NOTE: now we're on the ComicForm Application Thread
self.__label.Text = "Scraping: " + book_name
self.__pbox_panel.set_image(cover_image) # cover image may be None
self.__progbar.PerformStep()
self.__progbar.Maximum = self.__progbar.Value + num_remaining
self.__cancel_button.Text="Cancel (" + \
sstr(num_remaining) + " remaining)"
self.Update()
utils.invoke(self, delegate, False)
#===========================================================================
@classmethod
def show_threadsafe(cls, *args):
'''
A threadsafe method for instantiating a new ComicForm on a NEW
Application thread, and then displaying it to the user. The Application
thread will shutdown and dispose automatically when the ComicForm is
closed.
All given arguments will be passed to the new ComicForm's constructor.
'''
cls.newform = None
def shower():
with cls(*args) as form:
Monitor.Enter(cls)
try:
cls.newform = form
Monitor.Pulse(cls)
finally:
Monitor.Exit(cls)
def exception_handler(sender, event):
log.handle_error(event.Exception)
Application.ThreadException +=\
ThreadExceptionEventHandler(exception_handler)
Application.Run(form) # start form on new App thread; blocks
Monitor.Enter(cls)
try:
# starts a new thread, which will become the Application thread/ event
# pump for the newly created ComicForm,
Thread(ThreadStart(shower)).Start()
Monitor.Wait(cls)
finally:
Monitor.Exit(cls)
newform = cls.newform
del cls.newform
# make sure this method does not return until the newly created ComicForm
# has actually been made visible and active. see bug 139.
def activate_form():
# this call is probably not needed; the real trick here is that
# invoking this method synchronously delays us until the form has
# a nice, visible handle on-screen
newform.Activate()
utils.invoke(newform, activate_form, True)
return newform
# ==========================================================================
def close_threadsafe(self):
'''
A threadsafe method for closing this ComicForm and disposing of it's
Application thread. Note that closing the form with this method will
NOT flag the ScraperEngine as cancelled. Any other method of closing
(user clicks on red x, the Close() method, etc) will flag cancel.
'''
def delegate():
self.__cancel_on_close = False
self.Close()
self.Dispose()
if not self.__already_closed:
utils.invoke(self, delegate, True)
# ==========================================================================
def __form_closing_fired(self, sender, args):
''' This method is called just before this form closes. '''
# Flag this form as 'already closed', so that the close_threadsafe
# method can be called multiple times reliably.
self.__already_closed = True
# ==========================================================================
def __form_closed_fired(self, sender, args):
''' This method is called just after this form closes. '''
# deregister listers; prevents infinite loop!
self.__scraper.start_scrape_listeners.remove(self.__start_scrape)
self.__scraper.cancel_listeners.remove(self.close_threadsafe)
# in some cases, we should interpret the closing of this form as
# a request by the user to cancel the entire scrape operation...
if self.__cancel_on_close:
self.__scraper.cancel()
# clean up and disposal
self.__pbox_panel.Dispose(True)
del self.__pbox_panel
GC.Collect()
# ==========================================================================
def CenterToParent(self):
# Overridden to makes the initial position of this form a little nicer
# users will quickly set their own form positions anyway.
super(ComicForm, self).CenterToParent(self)
self.Location = Point(self.Location.X - self.Width, self.Location.Y)
# =============================================================================
class _PictureBoxPanel(Panel):
'''
A custom panel that contains a centered PictureBox. You can set the image
in that PictureBox, and whenever the panel is resized, the PictureBox will be
automatically resized to be as big as possible while still maintaining that
image's original aspect ratio (i.e. blank space added to the images sides
or top/bottom, as needed.
'''
#===========================================================================
def __init__(self):
''' Creates a _PictureBoxPanel. Call set_image after initialization. '''
Panel.__init__(self)
self._picbox = PictureBox()
self._picbox.SizeMode = PictureBoxSizeMode.StretchImage
self._picbox.Location = Point(0,0)
self.Controls.Add(self._picbox)
self.set_image(None)
self.Disposed += self.__disposed_fired
self.Resize += self.__resize_fired
# ==========================================================================
def set_image(self, image):
'''
Sets a new image for this _PictureBoxPanel to display. If this image is
None, a default logo will be displayed. Any previous image that was set
will have its Dispose() method called before it is discarded.
'''
if not image:
image = resources.createComicVineLogo()
self._ratio = 0;
if image and float(image.Height):
self._ratio = float(image.Width) / float(image.Height)
if not self._ratio:
self._ratio =1.55
# dispose the old image, if need be
if self._picbox.Image:
self._picbox.Image.Dispose()
self._picbox.Image = image
self.OnResize(None)
# ==========================================================================
def __resize_fired(self, sender, args):
''' This method is called whenever this panel is resized. '''
# adjust the size of our PictureBox as needed to fulfill our contract
panel_bds = self.Bounds
a = Rectangle(panel_bds.X, panel_bds.Y,\
self.Size.Width, self.Size.Width / self._ratio)
b = Rectangle(panel_bds.X, panel_bds.Y,\
self.Size.Height * self._ratio, self.Size.Height)
if panel_bds.Contains(a):
self._picbox.Size = a.Size
self._picbox.Location = Point( 0, (self.Size.Height-a.Height)/2 )
else:
self._picbox.Size = b.Size
self._picbox.Location = Point( (self.Size.Width-b.Width)/2, 0 )
# ==========================================================================
def __disposed_fired(self, sender, args):
''' This method is called when this panel is disposed '''
# force the PictureBox to dispose in a timely manner
if self._picbox.Image:
self._picbox.Image.Dispose()
self._picbox.Image = None
| Python |
'''
This module is home to the SeriesForm and SeriesFormResult classes.
@author: Cory Banack
'''
import re
import clr
from buttondgv import ButtonDataGridView
from cvform import CVForm
from dbpicturebox import DBPictureBox
from utils import sstr
import datetime
clr.AddReference('System')
from System.ComponentModel import ListSortDirection
clr.AddReference('System.Drawing')
from System.Drawing import Point, Size
clr.AddReference('System.Windows.Forms')
from System.Windows.Forms import AutoScaleMode, Button, CheckBox, \
DataGridViewAutoSizeColumnMode, DataGridViewContentAlignment, \
DataGridViewSelectionMode, DialogResult, FlatStyle, Label
#==============================================================================
class SeriesForm(CVForm):
'''
This class is a popup, modal dialog that displays all of the Comic Book
series that match a particular search string. The series are shown in a
table, which the user can navigate through, browsing the cover art for
the first issue of each series. Once the user has selected the series that
matches the comic that she is scraping, she clicks the ok button to close
this dialog and continue scraping her comic using the identified SeriesRef.
'''
#===========================================================================
def __init__(self, scraper, book, series_refs, search_terms_s):
'''
Initializes this form.
'scraper' -> the currently running ScrapeEngine
'book' -> the ComicBook being scraped
'series_refs' -> set or list containing the SeriesRefs to display
'search_terms_s' -> the user's search string that found the series models
'''
# the the shared global configuration
self.__config = scraper.config
# a list of SeriesRef objects that back this form; one ref per table
# row, where each SeriesRef represents a series the user can pick
self.__series_refs = list(series_refs)
# the 'ok' button for this dialog
self.__ok_button = None
# the 'show issues' button for this dialog
self.__issues_button = None
# the table that displays series (on per row) for the user to pick from
self.__table = None
# a PictureBox that displays the cover art for the current selected series
self.__cover_image = None
# a checkbox for toggling the display of the cover image on/off
self.__checkbox = None
# the index (in self.__series_refs) of the currently selected SeriesRef
self.__chosen_index = None
if len(series_refs) <= 0:
raise Exception("do not invoke the SeriesForm with no series!")
CVForm.__init__(self, scraper.comicrack.MainWindow, "seriesformLocation")
self.__build_gui(book, search_terms_s);
scraper.cancel_listeners.append(self.Close)
#===========================================================================
def __build_gui(self, book, search_terms_s):
''' Constructs and initializes the gui for this form. '''
# 1. --- build each gui component
self.__ok_button = self.__build_okbutton()
skip_button = self.__build_skipbutton()
search_button = self.__build_searchbutton()
self.__issues_button = self.__build_issuesbutton()
label = self.__build_label(search_terms_s, len(self.__series_refs))
self.__table = self.__build_table(
self.__series_refs, book, self.__ok_button)
self.__cover_image = self.__build_coverimage(self.__series_refs)
self.__checkbox = self.__build_checkbox(self.__config)
# 2. --- configure this form, and add all the gui components to it
self.AutoScaleMode = AutoScaleMode.Font
self.ClientSize = Size(730, 395)
self.Text = 'Choose a Comic Book Series'
self.FormClosed += self.__form_closed_fired
self.Controls.Add (label)
self.Controls.Add(self.__table)
self.Controls.Add (self.__ok_button)
self.Controls.Add (skip_button)
self.Controls.Add (search_button)
self.Controls.Add (self.__issues_button)
self.Controls.Add(self.__cover_image)
self.Controls.Add(self.__checkbox)
# 3. --- define the keyboard focus tab traversal ordering
self.__ok_button.TabIndex = 1
skip_button.TabIndex = 2
search_button.TabIndex = 3
self.__issues_button.TabIndex = 4
self.__checkbox.TabIndex = 5
self.__table.TabIndex = 6
# 4. --- make sure the UI goes into a good initial state
self.__change_table_selection_fired(None, None)
self.__toggle_checkbox_fired(None, None)
# ==========================================================================
def __build_table(self, series_refs, book, enter_button):
'''
Builds and returns the table for this form.
'series_refs' -> a list with one SeriesRef object for each found series
'book' -> the ComicBook being scraped
'enter_button' -> the button to "press" if the user hits enter
'''
# 1. --- configure the table itself
table = ButtonDataGridView(enter_button)
table.AllowUserToOrderColumns = True
table.SelectionMode = DataGridViewSelectionMode.FullRowSelect
table.MultiSelect = False
table.ReadOnly = True
table.RowHeadersVisible = False
table.AllowUserToAddRows = False
table.AllowUserToResizeRows = False
table.AllowUserToResizeColumns = False
table.DefaultCellStyle.NullValue = "--"
table.AutoResizeColumns
table.Location = Point(10, 60)
table.Size = Size(1,1) # gets updated by "__change_table_selection_fired"
# 2. --- build columns
table.ColumnCount = 7
table.Columns[0].Name = "Series"
table.Columns[0].DefaultCellStyle.Alignment =\
DataGridViewContentAlignment.MiddleLeft
table.Columns[0].AutoSizeMode = \
DataGridViewAutoSizeColumnMode.Fill
table.Columns[1].Name = "Year"
table.Columns[1].DefaultCellStyle.Alignment =\
DataGridViewContentAlignment.MiddleCenter
table.Columns[1].AutoSizeMode =\
DataGridViewAutoSizeColumnMode.AllCells
table.Columns[2].Name = "Issues"
table.Columns[2].DefaultCellStyle.Alignment =\
DataGridViewContentAlignment.MiddleCenter
table.Columns[2].AutoSizeMode =\
DataGridViewAutoSizeColumnMode.AllCells
table.Columns[3].Name = "Publisher"
table.Columns[3].DefaultCellStyle.Alignment =\
DataGridViewContentAlignment.MiddleLeft
table.Columns[3].AutoSizeMode =\
DataGridViewAutoSizeColumnMode.AllCells
table.Columns[4].Name = "ID"
table.Columns[4].Visible = False
table.Columns[4].DefaultCellStyle.Alignment =\
DataGridViewContentAlignment.MiddleCenter
table.Columns[4].AutoSizeMode =\
DataGridViewAutoSizeColumnMode.AllCells
table.Columns[5].Name = "Match"
table.Columns[5].Visible = False
table.Columns[5].DefaultCellStyle.Alignment =\
DataGridViewContentAlignment.MiddleCenter
table.Columns[5].AutoSizeMode =\
DataGridViewAutoSizeColumnMode.AllCells
table.Columns[6].Name = "Model ID"
table.Columns[6].Visible = False
table.Columns[6].DefaultCellStyle.Alignment =\
DataGridViewContentAlignment.MiddleCenter
table.Columns[6].AutoSizeMode =\
DataGridViewAutoSizeColumnMode.AllCells
# 3. --- copy model data into the table, each series is a row
for i in range(len(series_refs)):
table.Rows.Add()
ref = series_refs[i]
table.Rows[i].Cells[0].Value = ref.series_name_s
if ref.start_year_s:
table.Rows[i].Cells[1].Value = int(ref.start_year_s)
table.Rows[i].Cells[2].Value = ref.issue_count_n
table.Rows[i].Cells[3].Value = ref.publisher_s
table.Rows[i].Cells[4].Value = ref.series_key
table.Rows[i].Cells[5].Value = self.__compute_match_score_n(book, ref)
table.Rows[i].Cells[6].Value = i
# 4. --- sort on the "match" colum
table.Sort( table.Columns[5], ListSortDirection.Descending )
table.SelectionChanged += self.__change_table_selection_fired
return table
# ==========================================================================
def __build_okbutton(self):
''' builds and returns the ok button for this form '''
button = Button()
button.DialogResult = DialogResult.OK
button.Location = Point(15, 362)
button.Size = Size(80, 24)
button.Text = '&Ok'
return button
# ==========================================================================
def __build_skipbutton(self):
''' builds and return the skip button for this form '''
button = Button()
button.DialogResult = DialogResult.Ignore
button.Location = Point(100, 362)
button.Size = Size(80, 24)
button.Text = '&Skip'
return button
# ==========================================================================
def __build_searchbutton(self):
''' builds and return the 'search again' button for this form '''
button = Button()
button.DialogResult = DialogResult.Retry
button.Location = Point(325, 362)
button.Size = Size(90, 24)
button.Text = 'Search &Again'
return button
# ==========================================================================
def __build_issuesbutton(self):
''' builds and return the 'show issues' button for this form '''
button = Button()
button.DialogResult = DialogResult.Yes
button.Location = Point(420, 362)
button.Size = Size(90, 24)
button.Text = 'Show &Issues...'
return button
# ==========================================================================
def __build_label(self, search_terms_s, num_matches_n):
'''
Builds and return the text label for this form.
'search_terms_s' -> user's search string that was used to find series
'num_matches_n' -> number of series (table rows) the user's search matched
'''
label = Label()
label.AutoSize = True
label.Location = Point(10, 20)
if num_matches_n > 1:
label.Text = "Searched for '{0}' and found {1} matching series.\n"\
'Please choose one from the list below, '\
"or click 'Skip' to scrape this comic later.".format(
search_terms_s, num_matches_n )
else:
label.Text = "Searched for '{0}' and found one matching series.\n"\
"Click 'OK' to confirm that this is correct, "\
"or click 'Skip' to scrape this comic later.".format(search_terms_s)
return label
# ==========================================================================
def __build_coverimage(self, series_refs):
'''
Builds and returns the cover image PictureBox for this form.
'series_refs' -> a list with one SeriesRef object for each found series
'''
cover = DBPictureBox()
cover.Location = Point(523, 31)
cover.Size = Size(195, 320)
return cover
# ==========================================================================
def __build_checkbox(self, config):
'''
Builds and return the checkbox for toggling cover image display.
''config' -> the shared global Configuration object
'''
checkbox = CheckBox()
checkbox.Name = 'seriesThumbs'
checkbox.AutoSize = True
checkbox.Checked = config.show_covers_b
checkbox.FlatStyle = FlatStyle.System
checkbox.Location = Point(580, 365)
checkbox.Size = Size(100, 17)
checkbox.Text = 'Show Series Art'
checkbox.UseVisualStyleBackColor = True
checkbox.CheckedChanged += self.__toggle_checkbox_fired
return checkbox
#===========================================================================
def __compute_match_score_n(self, book, series_ref):
'''
Computes a score for the given SeriesRef, which describes how closely
that ref matches the given book. The higher the score, the closer
the match. Scores can be negative.
'''
# this function splits up the given comic book series name into
# separate words, so we can compare different series names word-by-word
def split( name_s ):
if name_s is None: name_s = ''
name_s = re.sub('\'','', name_s).lower()
name_s = re.sub(r'\W+',' ', name_s)
name_s = re.sub(r'giant[- ]*sized?', r'giant size', name_s)
name_s = re.sub(r'king[- ]*sized?', r'king size', name_s)
name_s = re.sub(r'one[- ]*shot', r'one shot', name_s)
return name_s.split()
# 1. first, compute the 'namescore', which is based on how many words in
# our book name match words in the series' name (usually comes up with
# a value on the range [5, 20], approximately.)
bookname_s = '' if not book.series_s else book.series_s
if bookname_s and book.format_s:
bookname_s += ' ' + book.format_s
bookwords = split(bookname_s)
serieswords = split(series_ref.series_name_s)
namescore_n = 0
for word in bookwords:
if word in serieswords:
namescore_n += 5
serieswords.remove(word)
namescore_n -= len(serieswords)
# 2. get the 'bookscore', which compares our book's issue number
# with the number of issues in the series. a step function that
# returns a very high number (100) if the number of issues in the
# series is compatible, and a very low one (-100) if it is not.
booknumber_n = book.issue_num_s if book.issue_num_s else '-1000'
booknumber_n = re.sub('[^\d.-]+', '', booknumber_n)
try:
booknumber_n = float(booknumber_n)
except:
booknumber_n = -999
series_count_n = series_ref.issue_count_n
if series_count_n > 100:
# all large series have a "good" bookscore, cause they are very
# long-running and popular. Also, we might overlook them in the
# bookscore because and databases will often not have all of issues,
# so their issue count will not be high enough.
bookscore_n = 100
else:
# otherwise, if we get a good score only if we have the right
# number of books in the series to match the booknumber (-1 for
# delayed updates of the database).
bookscore_n = 100 if booknumber_n-1 <= series_count_n else -100
# 3. get the 'yearscore', which severely penalizes (-500) any series
# that started after the year that the current book was published.
current_year_n = datetime.datetime.now().year
def valid_year_b(year_n):
return year_n > 1900 and year_n <= current_year_n+1
try: series_year_n = int(series_ref.start_year_s)
except: series_year_n = 0
yearscore_n = 0
if valid_year_b(book.year_n):
if not valid_year_b(series_year_n):
yearscore_n = -100
elif series_year_n > book.year_n:
yearscore_n = -500
# 4. get the 'recency score', which is a tiny negative value (usually
# around on the range [-0.50, 0]) that gets worse (smaller) the older
# the series is. this is really a tie-breaker for series with
# otherwise identical scores.
if valid_year_b(series_year_n):
recency_score_n = -(current_year_n - series_year_n) / 100.0;
else:
recency_score_n = -1.0
# 5. add up and return all the scores
return bookscore_n + namescore_n + yearscore_n + recency_score_n
# ==========================================================================
def show_form(self):
'''
Displays this form, blocking until the user closes it. When it is closed,
it will return a SeriesFormResult describing how it was closed, and any
SeriesRef that may have been chosen when it was closed.
'''
dialogAnswer = self.ShowDialog(self.Owner) # blocks
if dialogAnswer == DialogResult.OK:
result = SeriesFormResult( SeriesFormResult.OK,
self.__series_refs[self.__chosen_index] )
elif dialogAnswer == DialogResult.Yes:
result = SeriesFormResult( SeriesFormResult.SHOW,
self.__series_refs[self.__chosen_index] )
elif dialogAnswer == DialogResult.Cancel:
result = SeriesFormResult( SeriesFormResult.CANCEL)
elif dialogAnswer == DialogResult.Ignore:
result = SeriesFormResult( SeriesFormResult.SKIP)
elif dialogAnswer == DialogResult.Retry:
result = SeriesFormResult( SeriesFormResult.SEARCH)
else:
raise Exception()
return result
#===========================================================================
def __form_closed_fired(self, sender, args):
''' this method is called whenever this SeriesForm is closed. '''
self.__table.Dispose()
self.__cover_image.free()
self.Closed -= self.__form_closed_fired
#===========================================================================
def __toggle_checkbox_fired(self, sender, args):
''' this method is called when the form's checkbox is toggled '''
self.__config.show_covers_b = self.__checkbox.Checked
if self.__config.show_covers_b:
self.__cover_image.Show()
self.__table.Size = Size(500, 290)
else:
self.__cover_image.Hide()
self.__table.Size = Size(710, 290)
#===========================================================================
def __change_table_selection_fired(self, sender, args):
''' this method is called whenever the table's selected row changes. '''
# update __chosen_index (eventually used as this dialog's return value)
# and then also use it to update the displayed cover image.
selected_rows = self.__table.SelectedRows
if selected_rows.Count == 1:
self.__chosen_index = selected_rows[0].Cells[6].Value
self.__cover_image.set_image_ref(
self.__series_refs[self.__chosen_index])
else:
self.__chosen_index = None
self.__cover_image.set_image_ref(None)
# don't let the user click 'ok' or 'show issue' if no row is selected!
self.__ok_button.Enabled = selected_rows.Count == 1
self.__issues_button.Enabled = selected_rows.Count == 1
#==============================================================================
class SeriesFormResult(object):
'''
Results that can be returned from the SeriesForm.show_form() method. The
'name' of this object describes the manner in which the user closed the
dialog:
1) SeriesFormResult.CANCEL means the user cancelled this scrape operation.
2) SeriesormResult.SKIP means the user elected to skip the current book.
3) SeriesFormResult.SEARCH means the user chose to 'search again'
4) SeriesFormResult.OK means the user chose a SeriesRef, and the script
should try to automatically choose the correct issue for that SeriesRef.
5) SeriesFormResult.SHOW means the user chose a SeriesRef, and the script
should NOT automatically choose issue for that SeriesRef--it should
show the IssueForm and let the user choose manually.
Note that if the SeriesFormResult has a name of 'OK' or 'SHOW', it should
also have a non-None 'ref', which is of course the actual SeriesRef that
the user chose.
'''
OK = "ok"
SHOW = "show"
CANCEL = "cancel"
SKIP = "skip"
SEARCH = "search"
#===========================================================================
def __init__(self, name, ref=None):
'''
Creates a new SeriesFormResult.
name -> the name of the result, i.e. based on what button the user pressed
ref -> the reference that the user chose, if they chose one at all.
'''
if name != self.OK and name != self.SHOW and name != self.CANCEL and \
name != self.SKIP and name != self.SEARCH:
raise Exception();
self.__ref = ref if name == self.OK or name == self.SHOW else None;
self.__name = name;
#===========================================================================
def get_name(self):
''' Gets the 'name' portion of this result (see possibilities above) '''
return self.__name;
#===========================================================================
def get_ref(self):
'''
Gets the SeriesRef portion of this result, i.e. the one the user picked.
This is only defined when the'name' of this result is "OK" or "SHOW".
'''
return self.__ref;
#===========================================================================
def get_debug_string(self):
''' Gets a simple little debug string summarizing this result.'''
if self.get_name() == self.SKIP:
return "SKIP scraping this book"
elif self.get_name() == self.CANCEL:
return "CANCEL this scrape operation"
elif self.get_name() == self.SEARCH:
return "SEARCH AGAIN for more series"
elif self.get_name() == self.SHOW:
return "SHOW ISSUES for: '" + sstr(self.get_ref()) + "'"
elif self.get_name() == self.OK:
return "SCRAPE using: '" + sstr(self.get_ref()) + "'"
else:
raise Exception()
| Python |
'''
This module contains the PersistentForm class.
@author: Cory Banack
'''
#corylow: comment and cleanup this file
import clr
import log
from utils import sstr, load_map, persist_map
import resources
clr.AddReference('System')
from System.Threading import Thread, ThreadStart
clr.AddReference('System.Drawing')
from System.Drawing import Point, Rectangle, Size
clr.AddReference('System.Windows.Forms')
from System.Windows.Forms import Form, FormStartPosition, Screen
#==============================================================================
class PersistentForm(Form):
'''
A superclass of all the forms in the Comic Vine Scraper. It is responsible
for implementing persistence (i.e. save/restore between runs of the app) of
each form's location and size. All data is stored and read from the
geometry settings file.
Subclasses indicate which attributes (location, size) should be persisted
by passing specific arguments into the constructor.
'''
#===========================================================================
def __init__(self, persist_loc_key_s = None, persist_size_key_s = None):
'''
Constructs a new PersistentForm.
If persist_loc_key_s is specified, this form will attempt to save and
restore its previous location every time it is run. If no previous is
available, or if this parameter is None, then the form will be centered on
it's parent via Form.CenterToParent().
If persist_size_key_s is specified, this form will attempt to save and
restore its previous size every time it is run (a feature which only makes
sense if the form is resizable!). If no previous is available, or if this
parameter is None, the form will use its natural size (i.e. no change)
Note that both of these parameters should be unique, as they are also
used as KEYS to the saved values that they represent in the geometry
settings file!
'''
super(PersistentForm, self).__init__()
# whether or not the user has moved or resized this form.
self._bounds_changed = False
# the pref key for persisting this form's location, or None to skip
self._persist_loc_key_s = persist_loc_key_s
# the pref key for persisting this form's size, or None to skip
self._persist_size_key_s = persist_size_key_s
self._initialize()
#===========================================================================
def _initialize(self):
''' intial configuration for new instances of this class '''
Form.__init__(self)
self.StartPosition = FormStartPosition.Manual
self.Load += self._install_persistent_bounds
#===========================================================================
def _install_persistent_bounds(self, a, b):
"""
Called when this Form is just about to be displayed. This method tries to
restore the previously used location/size settings, and it also checks
them to make sure that they are still valid on the current monitor
configuration (and fixes them if they are not!)
"""
self._load_bounds()
# compute the center of our current bounding rectangle
b = self.Bounds
center = Point(b.X+b.Width/2, b.Y+b.Height/2)
# if the center of this window is not onscreen, make it so that it is
screens = Screen.AllScreens
screen_bounds = screens[0].Bounds
for screen in screens:
screen_bounds = Rectangle.Union(screen_bounds, screen.Bounds)
if not screen_bounds.Contains(center):
log.debug("WARNING: form's location was offscreen; adjusted it")
self.CenterToScreen()
self._bounds_changed = True
else:
self._bounds_changed = False
#===========================================================================
def _load_bounds(self):
"""
Attempts to load the persisted size/location details from the geometry
settings files, and adjust the form's current state to match.
"""
do_default_loc = True
if self._persist_size_key_s or self._persist_loc_key_s:
prefs = load_map(resources.GEOMETRY_FILE)
# grab the stored size value, if any, and apply it
if self._persist_size_key_s and self._persist_size_key_s in prefs:
try:
size = prefs[self._persist_size_key_s].split(',')
self.Size = Size(int(size[0]), int(size[1]))
except:
# didn't work, just stick with the forms current size
pass
# grab the stored location value, if any, and apply it
if self._persist_loc_key_s and self._persist_loc_key_s in prefs:
try:
loc = prefs[self._persist_loc_key_s].split(',')
loc = Point(int(loc[0]), int(loc[1]))
self.Location = loc
do_default_loc = False
except:
do_default_loc = True
if do_default_loc:
self.CenterToParent()
#===========================================================================
def _save_bounds(self):
"""
Attempts to store this form's current size/location details into the
geometry settings file.
"""
# might as well use an off thread, makes the gui a bit more responsive
log.debug("saved window geometry: ", self._persist_loc_key_s,
" ", self._persist_size_key_s )
def delegate():
if self._persist_size_key_s or self._persist_loc_key_s:
prefs = load_map(resources.GEOMETRY_FILE)
if self._persist_loc_key_s:
prefs[self._persist_loc_key_s] =\
sstr(self.Location.X) + "," + sstr(self.Location.Y)
if self._persist_size_key_s:
prefs[self._persist_size_key_s] =\
sstr(self.Width) + "," + sstr(self.Height)
persist_map(prefs, resources.GEOMETRY_FILE)
Thread(ThreadStart(delegate)).Start()
#===========================================================================
def OnMove(self, args):
# Overridden to record that the location of this form has changed
Form.OnMove(self, args)
self._bounds_changed = True
#===========================================================================
def OnResize(self, args):
# Overridden to record that the size of this form has changed
Form.OnResize(self, args)
self._bounds_changed = True
#===========================================================================
def OnFormClosing(self, args):
# Overridden to make sure that we write out our persistent size/location
# changes (if there were any) and do any other cleanup needed before
# shutting down this window.
if self._bounds_changed:
self._save_bounds()
self.Load -= self._install_persistent_bounds
Form.OnFormClosing(self, args)
| Python |
'''
This module is home to the FinishForm class.
@author: Cory Banack
'''
import clr
import resources
from cvform import CVForm
clr.AddReference('System.Windows.Forms')
from System.Windows.Forms import AutoScaleMode, Button, DialogResult, Label
clr.AddReference('System.Drawing')
from System.Drawing import ContentAlignment, Point, Size
# =============================================================================
class FinishForm(CVForm):
'''
This is the last modal popup dialog that you see when you run the scraper.
It lets you know how many books were scraped, and how many were skipped.
'''
#===========================================================================
def __init__(self, scraper, status):
'''
Initializes this form.
'scraper' -> the ScrapeEngine that we are running as part of.
'status' -> a list containing two integers, the first is the number of
books that were scraped and the second is the number that were
skipped (both reported to the user by this form)
'''
CVForm.__init__(self, scraper.comicrack.MainWindow, "finishformLocation")
self.__build_gui( status[0], status[1] )
# ==========================================================================
def __build_gui(self, scraped_n, skipped_n):
'''
Constructs and initializes the gui for this form.
'scraped_n' -> the number of books that were scraped (reported to user)
'skipped_n' -> the number of books that were skipped (reported to user)
'''
# 1. --- build each gui component
scrape_label = self.__build_scrape_label(scraped_n)
skip_label = self.__build_skip_label(skipped_n)
ok = self.__build_okbutton()
# 2. --- configure this form, and add all the gui components to it
self.AcceptButton = ok
self.AutoScaleMode = AutoScaleMode.Font
self.Text = 'Comic Vine Scraper - v' + resources.SCRIPT_VERSION
self.ClientSize = Size(300, 90)
self.Controls.Add(scrape_label)
self.Controls.Add(skip_label)
self.Controls.Add(ok)
# 3. --- define the keyboard focus tab traversal ordering
ok.TabIndex = 0
# ==========================================================================
def __build_scrape_label(self, scraped_n):
'''
Builds and returns the 'number scraped' Label for this form.
'scraped_n' -> the number of books that were scraped.
'''
label = Label()
label.Location = Point(10, 10)
label.Size = Size(280, 13)
label.TextAlign = ContentAlignment.MiddleCenter
label.Text = "Scraped details for {0} comic book{1}."\
.format(scraped_n, "" if scraped_n==1 else "s")
return label
# ==========================================================================
def __build_skip_label(self, skipped_n):
'''
Builds and returns the 'number skipped' Label for this form.
'skipped_n' -> the number of books that were skipped.
'''
label = Label()
label.Location = Point(10, 30)
label.Size = Size(280, 13)
label.TextAlign = ContentAlignment.MiddleCenter
label.Text = "Skipped {0} comic book{1}."\
.format(skipped_n, "" if skipped_n==1 else "s")
return label
# ==========================================================================
def __build_okbutton(self):
''' Builds and returns the ok button for this form. '''
button = Button()
button.DialogResult = DialogResult.OK
button.Location = Point(120, 58)
button.Size = Size(60, 23)
button.Text = 'Ok'
button.UseVisualStyleBackColor = True
return button
# ==========================================================================
def show_form(self):
''' Displays this form, blocking until the user closes it. '''
self.ShowDialog() # blocks
return None | Python |
'''
This module contains the CVForm class.
@author: Cory Banack
'''
import clr
clr.AddReference('System.Windows.Forms')
from System.Windows.Forms import FormBorderStyle, Keys
from persistentform import PersistentForm
#==============================================================================
class CVForm(PersistentForm):
'''
This class is the direct superclass of all Comic Vine Scraper Forms.
It contains functionality and default configuration that is common to
all forms in this application.
'''
#===========================================================================
def __init__(self, owner, persist_loc_key_s = "", persist_size_key_s = "" ):
'''
Constructs a new CVForm.
Requires an owner parameter, which is the Form that will own this form.
The other two parameters are passed up to the PersistentForm superclass.
'''
super(CVForm, self).__init__( persist_loc_key_s, persist_size_key_s )
# these are the default properties of all CVForms.
self.Owner = owner
self.Modal = False
self.MaximizeBox = False
self.MinimizeBox = False
self.ShowIcon = False
self.ShowInTaskbar = False
self.FormBorderStyle = FormBorderStyle.FixedToolWindow
#===========================================================================
def __enter__(self):
''' Called automatically if you use this form in a python "with" block.'''
return self
#===========================================================================
def __exit__(self, type, value, traceback):
''' Called automatically if you use this form in a python "with" block.'''
# ensure that the form is closed and disposed in a timely manner
self.Close()
self.Dispose()
#===========================================================================
def ProcessCmdKey(self, msg, keys):
''' Called anytime the user presses a key while this form has focus. '''
# overidden to ensure that all CVForms close themselves
# if you press the escape key.
if keys == Keys.Escape:
self.Close()
else:
super(CVForm, self).ProcessCmdKey(msg, keys)
| Python |
'''
This module is home to the IssueCoverPanel class.
@author: Cory Banack
'''
import clr
from dbpicturebox import DBPictureBox
from utils import sstr
import db
import utils
from scheduler import Scheduler
clr.AddReference('System.Drawing')
from System.Drawing import ContentAlignment, Font, FontStyle, Point, Size
clr.AddReference('System.Windows.Forms')
from System.Windows.Forms import Button, Label, Panel
#==============================================================================
class IssueCoverPanel(Panel):
'''
This panel is a compound gui compoent for displaying a comic book's issue
cover art (in a DBPictureBox), along with a few extra decorations.
Namely, there is a label displaying the current IssueRef's issue number
just below the DBPictureBox, and on either side of that there are two buttons
that allow you to navigate forward and backward through the list of alternate
cover art for the currently displayed issue.
You can set the issue that is currently being displayed by calling the
'set_issue' method. After that, background threads will take care of
loading the cover art (and alternate covers) for that issue.
Do not forget to 'free' this panel when it is not longer in use!
'''
#===========================================================================
def __init__(self, config):
'''
Initializes this panel.
'config' -> the shared global Configuration object
'''
# the shared global configuration
self.__config = config
# a PictureBox that displays the cover art for the current selected issue
self.__cover_image = None
# a label describing the currently displayed cover image
self.__label = None
# the "next" button for seeing an issue's next available cover
self.__nextbutton = None
# the "prev" button for seeing an issue's previous cover
self.__prevbutton = None
# the IssueRef of the issue that we are currently displaying
self.__issueref = None
# a mapping of IssueRefs to _ButtonModels. Basically caches the
# next/prev button state for each issue.
self.__button_cache = {}
# a simple "last-in-and-ignore-everything-else" scheduler
self.__scheduler = Scheduler()
# the user's alternate cover art choice, if she made one
# this url is None until this panel is disposed (i.e. 'free' is called)
self.__alt_cover_url = None
Panel.__init__(self)
self.__build_gui()
# ==========================================================================
def __build_gui(self):
''' Constructs and initializes the gui for this panel. '''
# 1. --- build each gui component
self.__cover_image = self.__build_coverimage()
self.__label = self.__build_label()
self.__nextbutton = self.__build_nextbutton()
self.__prevbutton = self.__build_prevbutton()
# 2. --- configure this form, and add all the gui components to it
self.Size = Size(195, 360)
self.Controls.Add(self.__cover_image)
self.Controls.Add(self.__prevbutton)
self.Controls.Add(self.__label)
self.Controls.Add(self.__nextbutton)
#4. --- make sure the UI goes into a good initial state and the callback
# function gets its initial call.
self.set_issue(None)
# ==========================================================================
def __build_coverimage(self):
''' builds and returns the cover image DBPictureBox for this panel '''
cover = DBPictureBox()
cover.Location = Point(0, 0)
cover.Size = Size(195, 320)
cover.Visible = self.__config.show_covers_b
return cover
# ==========================================================================
def __build_label(self):
'''
Builds and return the label for toggling cover image display.
'''
label = Label()
label.Visible = self.__config.show_covers_b
label.Location = Point(23, 326)
label.Size = Size(149,36)
label.TextAlign = ContentAlignment.MiddleCenter
return label
# ==========================================================================
def __build_nextbutton(self):
''' Builds and returns the 'next' button for this panel. '''
button = Button()
button.Location = Point(173, 332)
button.Size = Size(20, 24)
button.Text = '>'
button.Font = Font(button.Font, FontStyle.Bold)
button.UseVisualStyleBackColor = True
button.Click += self.__button_click_fired
# note: this button's visibility is manipulated by __update
return button
# ==========================================================================
def __build_prevbutton(self):
''' Builds and returns the 'previous' button for this panel. '''
button = Button()
button.Location = Point(2, 332)
button.Size = Size(20, 24)
button.Text = '<'
button.Font = Font(button.Font, FontStyle.Bold)
button.UseVisualStyleBackColor = True
button.Click += self.__button_click_fired
# note: this button's visibility is manipulated by __update
return button
# ==========================================================================
def free(self):
'''
Free all resources allocated by this class when it is no longer needed.
'''
# sets the alternate cover image that the user may have chosen
# see get_alt_cover_image_url for more details
if self.__issueref:
button_model = self.__button_cache[self.__issueref]
if button_model:
if button_model.can_decrement():
ref = button_model.get_current_ref()
if utils.is_string(ref):
self.__alt_cover_url = ref
self.__scheduler.shutdown(False)
self.set_issue(None)
self.__cover_image.free()
self.__prevbutton = None
self.__nextbutton = None
self.__label = None
self.Dispose()
# ==========================================================================
def set_issue(self, issue_ref):
'''
Sets the comic issue that this panel is displaying.
'issue_ref'-> the IssueRef for the issue that we are displaying, or None.
'''
self.__issueref = issue_ref
self.__update()
# ==========================================================================
def get_alt_cover_image_url(self):
'''
If the comic issue that this panel was displaying when it was closed was
set to display an alternate cover image (i.e. anything other than the
default image) then this method will return the string URL for that image.
Otherwise, or if the panel hasn't been shutdown yet, we return None.
'''
return self.__alt_cover_url
# ==========================================================================
def __update(self):
'''
Updates all elements of this controls GUI. Should be called anytime
anything has changed that might require a change to the data displayed
by one of this controls child controls.
Note that this method call may initiate a background thread to update a newly created
_ButtonModel at some point in the future.
'''
# 1. grab copies of all the member variables that we might want to use;
# keep in mind that any of the following code can be running AFTER this
# panel and it's form has been closed/disposed, so we don't want to
# directly rely on any 'self.' members.
issueref = self.__issueref
cache = self.__button_cache
cover_image = self.__cover_image
nextbutton = self.__nextbutton
prevbutton = self.__prevbutton
label = self.__label
scheduler = self.__scheduler
if issueref is None or cache is None:
# 2. do nothing, we're in a wierd/border state
cover_image.set_image_ref(None)
nextbutton.Visible = False
prevbutton.Visible = False
label.Enabled = True
label.Text = ''
else:
# 3a. make sure the cache has a _ButtonModel for the current issue
# also, if that _ButtonModel is not fully updated, update it.
if not cache.has_key(issueref) or \
not cache[issueref].is_fully_updated():
label.Enabled = False
if not cache.has_key(issueref):
cache[issueref] = _ButtonModel(issueref)
bmodel = cache[issueref]
def update_cache(): #runs on scheduler thread
issue = db.query_issue(issueref)
def add_refs(): # runs on application thread
bmodel.set_fully_updated()
if issue and len(issue.image_urls) > 1:
for i in range(1, len(issue.image_urls)):
bmodel.add_new_ref(issue.image_urls[i])
self.__update(); # recurse!
utils.invoke(self, add_refs, False)
scheduler.submit(update_cache)
# 3b. now that we have a bmodel for the current issue, adjust our
# various gui widgets according to its state. remember, if this
# bmodel was freshly created, it's going to contain a single
# image reference for now, but at some point in the future (~1 sec)
# this method will be automatically called again, and the bmodel
# will be fully updated, and may contain more images.
bmodel = cache[issueref]
cover_image.set_image_ref( bmodel.get_current_ref() )
nextbutton.Visible = cover_image.Visible and bmodel.can_increment()
prevbutton.Visible = cover_image.Visible and bmodel.can_decrement()
# 3c. update the text for the label.
issue_num_s = self.__issueref.issue_num_s if self.__issueref else ''
label.Enabled = bmodel.is_fully_updated()
if bmodel.is_fully_updated():
if issue_num_s:
if len(bmodel) > 1:
self.__label.Text = 'Issue ' + sstr(issue_num_s) + \
' - Cover ' + sstr(bmodel.get_ref_id()+1) + ' of ' + \
sstr(len(bmodel))
else:
self.__label.Text = 'Issue ' + sstr(issue_num_s) + \
' - Single Cover'
else:
self.__label.Text = ""
else:
self.__label.Text = "Searching for more covers..."
# ==========================================================================
def __button_click_fired(self, sender, args):
''' This method is called when the next/prev buttons are clicked '''
# should never happen when the button cache is empty or issueref is None
bmodel = self.__button_cache[self.__issueref]
if sender == self.__nextbutton:
bmodel.increment()
else:
bmodel.decrement()
self.__update()
# =============================================================================
class _ButtonModel(object):
'''
Contains state for the next/prev buttons for a single comic issue.
In particular, contains 1 or more 'image references' for that issue.
Each image reference is a url (or IssueRef object) that maps to one of
the cover art images for that issue.
'''
#===========================================================================
def __init__(self, issue_ref):
'''
Initializes this _ButtonModel with the given issue_ref as its sole
image reference. More references can be added.
'issue_ref' -> an IssueRef that will be our sole image reference (so far).
'''
# a list of all of this buttons image references. will always have at
# least one element, though that element may be a null image reference.
self.__image_refs = []
# the position of the 'current' element in the list of image references.
# this value can be changed by the 'increment' or 'decrement' methods.
self.__pos_n = 0
# true iff this _ButtonModel has been fully updated with all image refs
self.__is_fully_updated = False
self.add_new_ref(issue_ref)
#===========================================================================
def add_new_ref(self, image_ref):
'''
Adds a new image reference to this button model. The given ref should be
either an IssueRef (which can be used indirectly to obtain a cover image)
or the direct string url of the image.
'''
if image_ref:
if not image_ref in self.__image_refs:
self.__image_refs.append(image_ref)
#===========================================================================
def get_current_ref(self):
'''
Gets the current image reference for this _ButtonModel. This value will
be an IssueRef or an url string.
'''
return self.__image_refs[self.__pos_n]
#===========================================================================
def increment(self):
'''
Increments the current image reference (see 'get_current_ref') to the next
one in this _ButtonModel, unless we are already at the last one
(see 'can_increment').
'''
self.__pos_n = min(len(self.__image_refs)-1, self.__pos_n + 1)
#===========================================================================
def can_increment(self):
''' Returns whether we can increment this _ButtonModel any further. '''
return self.__pos_n < len(self.__image_refs)-1
#===========================================================================
def decrement(self):
'''
Decrements the current image reference (see 'get_current_ref') to the
previous one in this _ButtonModel, unless we are already at the first one
(see 'can_decrement').
'''
self.__pos_n = max(0, self.__pos_n - 1)
#===========================================================================
def can_decrement(self):
''' Returns whether we can decrement this _ButtonModel any further. '''
return self.__pos_n > 0
#===========================================================================
def set_fully_updated(self):
''' Marks this _ButtonModel as having a complete set of image refs.'''
self.__is_fully_updated = True
#===========================================================================
def is_fully_updated(self):
''' Returns whether this _ButtonModel is flagged as 'fully updated'.'''
return self.__is_fully_updated
#===========================================================================
def get_ref_id(self):
''' Returns the index of the current image reference.'''
return self.__pos_n
#===========================================================================
def __len__(self):
''' Allows the 'len' operation to work on _ButtonModels. '''
return len(self.__image_refs) | Python |
# This Python file uses the following encoding: us-ascii
# corylow: comment and cleanup this file
import sys
import os
import cPickle
import resources
import clr
clr.AddReferenceByPartialName("System.Windows.Forms")
clr.AddReferenceByPartialName("System.Drawing")
from System.Windows.Forms import Form
# add a reference to a directory containing mockups of key comic rack dlls
sys.path.append( os.path.dirname(os.path.dirname(__file__))+r"\comicrack")
# we have to add a few definitions to help the starting conditions when this
# script is running as part of ComicRack
import ComicVineScraper
class ComicRack:
class AppImpl:
ProductVersion = '999.999.99999'
def GetComicPage(self, arg1, arg2):
return None
def SetCustomBookThumbnail(self, book, bitmap):
return True
class MainForm(Form):
pass
App = AppImpl()
MainWindow = MainForm()
MainWindow.Show()
MainWindow.CenterToScreen()
ComicVineScraper.ComicRack = ComicRack
resources._SCRIPT_DIRECTORY = os.path.dirname( \
os.path.dirname( os.path.dirname(__file__))) + r'/profile/'
resources.LOCAL_CACHE_DIRECTORY = resources._SCRIPT_DIRECTORY + r'localCache/'
resources.SETTINGS_FILE = resources._SCRIPT_DIRECTORY + r'settings.dat'
resources.GEOMETRY_FILE = resources._SCRIPT_DIRECTORY + r'geometry.dat'
# now grab the
class Launcher(object):
def __init__(self):
if len(sys.argv) == 2:
# note that this doesn't work (for highly mysterious reasons) if you
# change the project source character encoding to anything other
# than US-ASCII
f = open(sys.argv[1], "r")
books = cPickle.load(f)
ComicVineScraper.ComicVineScraper(books)
else:
print "Usage: this script takes a single file as an argument."
Launcher() | Python |
from ged_handler import *
# Regular Expression
import re
# This class parse a gedcom file to create an object tree
# It use the handler to parse line by line
class Parser:
handler = None
def __init__(self):
self.handler = Handler()
# parse a gedcom line and return a tuple
# (<level>, <code>, <ref>, <info>)
def parseline(self, line):
if len(line) > 0 and line[-1] == '\n':
line = line[:-1]
level = int(line[0])
# line match '<level> @<ref>@ <code> <info>'
if re.match('[0-9]{1} @[a-zA-Z0-9]*@ [a-zA-Z0-9]+ .', line):
subline = line[line.find('@') + 1:]
code = subline[subline.find('@') + 2:]
info = code[code.find(' ') + 1:]
code = code[:code.find(' ')]
m = re.search('@[a-zA-Z0-9]*@', line)
ref = m.group(0)[1:-1]
# line match '<level> @<ref>@ code'
elif re.match('[0-9]{1} @[a-zA-Z0-9]*@ [a-zA-Z0-9]+', line):
subline = line[line.find('@') + 1:]
code = subline[subline.find('@') + 2:]
m = re.search('@[a-zA-Z0-9]*@', line)
ref = m.group(0)[1:-1]
info = None
# line match '<level> <code> @<ref>@'
elif re.match('[0-9]{1} [a-zA-Z]{4} @[a-zA-Z0-9]+@', line):
code = line[2:6]
ref = line[8:-1]
info = None
# line match '<level> <code> <info>'
elif re.match('[0-9]{1} [a-zA-Z]+ .', line):
code = line[2:6]
if code[3] == ' ':
code = code[0:-1]
info = line[6:]
else:
info = line[7:]
ref = None
# line match '<level> <code>'
else:
code = line[2:].strip()
info = ref = None
return (level, code, ref, info)
def parse(self, filename):
file = open(filename, 'r')
lines = file.readlines()
file.close()
tuples = []
for line in lines:
tuples.append(self.parseline(line))
#print('tuples', tuples)
self.handler = Handler()
for tuple in tuples:
self.handler.handle(tuple)
return self.handler.familyTree
| Python |
# Models
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import *
from sqlalchemy.orm import relationship, backref
Base = declarative_base()
class GedDate(Base):
__tablename__ = 'geddate'
geddate_id = Column(Integer, Sequence('geddate_sequence'), primary_key=True)
year = Column(Integer)
month = Column(Integer)
day = Column(Integer)
hour = Column(Integer)
minute = Column(Integer)
seconde = Column(Integer)
mode = Column(String)
def __init__(self):
self.mode = 'simple'
def processComplete(self):
if (self.year > 0 & self.month > 0 & self.day > 0):
complete_date = datetime(year, month, day)
class Event(Base):
__tablename__ = 'event'
event_id = Column(Integer, Sequence('event_sequence'), primary_key=True)
type = Column(String)
date_id = Column(Integer, ForeignKey('geddate.geddate_id'), name="date_id")
geddate = relationship(GedDate, foreign_keys=date_id,
primaryjoin=date_id == GedDate.geddate_id)
def __init__(self):
self.sources = []
self.notes = []
self.multimedialinks = []
class FamilyTreeSource(Base):
__tablename__ = 'familytree_source'
familytree_source_id = Column(Integer, Sequence('familytree_source_sequence'), primary_key=True)
system_id = Column(String)
name = Column(String)
version = Column(String)
corporate_id = Column(Integer, ForeignKey('familytree_source_corporate.familytree_source_corporate_id'), name="corporate_id")
def __init__(self):
pass
class FamilyTree (Base):
__tablename__ = 'familytree'
familytree_id = Column(Integer, Sequence('familytree_sequence'), primary_key=True)
charset = Column(String)
copyright = Column(String)
filename = Column(String, name="filename")
langage = Column(String)
source_id = Column(Integer, ForeignKey('familytree_source.familytree_source_id'), name="source_id")
date_id = Column(Integer, ForeignKey('geddate.geddate_id'), name="date_id")
geddate = relationship(GedDate, foreign_keys=date_id,
primaryjoin=date_id == GedDate.geddate_id)
source = relationship(FamilyTreeSource, foreign_keys=source_id,
primaryjoin=source_id == FamilyTreeSource.familytree_source_id)
def __init__(self):
self.individuals = []
self.families = []
self.sources = []
self.notes = []
self.submitters = []
individual_event_table = Table('individual_event', Base.metadata,
Column('individual_id', Integer, ForeignKey('individual.individual_id')),
Column('event_id', Integer, ForeignKey('event.event_id'))
)
family_children_table = Table('family_children', Base.metadata,
Column('family_id', Integer, ForeignKey('family.family_id')),
Column('individual_id', Integer, ForeignKey('individual.individual_id'))
)
individual_name_individual_table = Table('individual_name_individual', Base.metadata,
Column('individual_name_id', Integer, ForeignKey('individual_name.individual_name_id')),
Column('individual_id', Integer, ForeignKey('individual.individual_id'))
)
class Individual_Name (Base):
__tablename__ = 'individual_name'
individual_name_id = Column(Integer, Sequence('individual_name_sequence'), primary_key=True)
firstname = Column(String, name="firstname")
lastname = Column(String, name="lastname")
prefix = Column(String, name="prefix")
suffix = Column(String, name="suffix")
nickname = Column(String, name="nickname")
surname = Column(String, name="surname")
surname_prefix = Column(String, name="surname_prefix")
def __init__(self):
self.sources = []
self.notes = []
class Individual (Base):
__tablename__ = 'individual'
individual_id = Column(Integer, Sequence('individual_sequence'), primary_key=True)
personal_name = Column(String)
sex = Column(String)
ident = Column(String)
events = relationship(Event, secondary=individual_event_table)
names = relationship(Individual_Name, secondary=individual_name_individual_table)
#familychildren = relationship("Family", secondary=family_children_table)
def __init__(self, ident):
self.ident = ident
self.events = []
self.familychildren = []
self.familyspouse = []
self.sources = []
self.objects = []
self.associations = []
class Family (Base):
__tablename__ = 'family'
family_id = Column(Integer, Sequence('family_sequence'), primary_key=True)
husband_id = Column(Integer, ForeignKey('individual.individual_id'), name="husband_id")
wife_id = Column(Integer, ForeignKey('individual.individual_id'), name="wife_id")
ident = Column(String)
husband = relationship(Individual, foreign_keys=husband_id,
primaryjoin=husband_id == Individual.individual_id)
wife = relationship(Individual, foreign_keys=wife_id,
primaryjoin=wife_id == Individual.individual_id)
children = relationship("Individual",
secondary=family_children_table, backref="familychildren")
def __init__(self, ident):
self.ident = ident
self.children = []
self.events = []
self.sources = []
source_event_table = Table('source_event', Base.metadata,
Column('source_id', Integer, ForeignKey('source.source_id')),
Column('event_id', Integer, ForeignKey('event.event_id'))
)
class Source (Base):
__tablename__ = 'source'
source_id = Column(Integer, Sequence('source_sequence'), primary_key=True)
ident = Column(String, name="ident")
text = Column(String, name="text")
title = Column(String, name="title")
organisation = Column(String, name="organisation")
publication = Column(String, name="publication")
events = relationship(Event, secondary=source_event_table)
def __init__(self, ident):
self.ident = ident
self.objects = []
class Note:
def __init__(self, ident):
if (ident != None):
self.ident = ident
class Submitter:
def __init__(self, ident):
self.ident = ident
class Place:
def __init__(self):
pass
class Map:
def __init__(self):
pass
class Address (Base):
__tablename__ = 'address'
id = Column(Integer, primary_key=True)
line = Column(String)
def __init__(self):
pass
class Change:
def __init__(self):
self.notes = []
class GedcomInfo:
def __init__(self):
pass
class SourceData:
def __init__(self):
pass
class Object:
filename = None
def __init__(self):
pass
def __init__(self, ident):
self.ident = ident
class Association:
def __init__(self):
self.sources = []
self.notes = []
class Corporation(Base):
__tablename__ = 'familytree_source_corporate'
familytree_source_corporate_id = Column(Integer, Sequence('familytree_source_corporate_sequence'), primary_key=True)
name = Column(String)
def __init__(self):
pass
class PhoneNumber:
def __init__(self):
pass
| Python |
from model import *
MONTHS = {'JAN': 1, 'FEB': 2, 'MAR': 3, 'APR': 4, 'MAY': 5, 'JUN': 6, 'JUL': 7, 'AUG': 8, 'SEP': 9, 'OCT': 10, 'NOV': 11, 'DEC':12}
class Handler:
def __init__(self):
self.currentlevel = -1
self.objectstack = []
self.familyTree = None
self.objects = {'INDI': {}, 'FAM':{}, 'SOUR': {}, 'NOTE': {}, 'SUBM': {}, 'ASSO': {}}
def read_individual(self, ident):
individual = None
# test if individual is present in list of individuals already read
if ident in self.objects['INDI']:
individual = self.objects['INDI'][ident]
else:
individual = Individual(ident)
self.objects['INDI'][ident] = individual
self.objectstack.append(individual)
self.familyTree.individuals.append(individual)
return individual
def read_family(self, ident):
family = None
if ident in self.objects['FAM']:
family = self.objects['FAM'][ident]
else:
family = Family(ident)
self.objects['FAM'][ident] = family
self.objectstack.append(family)
self.familyTree.families.append(family)
return family
def read_source(self, ident):
source = None
if ident in self.objects['SOUR']:
source = self.objects['SOUR'][ident]
else:
source = Source(ident)
self.objects['SOUR'][ident] = source
self.objectstack.append(source)
self.familyTree.sources.append(source)
return source
def read_note(self, ident, text):
note = None
if ident != None:
if ident in self.objects['NOTE']:
note = self.objects['NOTE'][ident]
else:
note = Note(ident)
self.objects['NOTE'][ident] = note
self.objectstack.append(note)
self.familyTree.notes.append(note)
else:
note = Note(None)
self.objectstack[-1].notes.append(note)
self.objectstack.append(note)
if text != None:
note.text = text
return note
def read_submitter(self, ident):
source = None
if ident in self.objects['SUBM']:
submitter = self.objects['SUBM'][ident]
else:
submitter = Submitter(ident)
self.objects['SUBM'][ident] = submitter
self.objectstack.append(submitter)
self.familyTree.submitters.append(submitter)
return submitter
def read_date(self, value):
date = self.objectstack[-1].geddate = GedDate()
self.parse_date(date, value)
self.objectstack[-1].geddate = date
self.objectstack.append(date)
def parse_date(self, date, line):
dateline = line
# read mode
if line.find('BEF.') >= 0:
date.mode = 'before'
self.parse_simple_date(date, line[5:])
elif line.find('AFT.') >= 0:
date.mode = 'after'
self.parse_simple_date(date, line[5:])
elif line.find('BET.') >= 0:
date.mode = 'between'
elements = line[5:].rsplit('-')
# beginning
beginning_date = GedDate()
self.parse_simple_date(beginning_date, elements[0].strip())
date.beginning = beginning_date
# end
end_date = GedDate()
self.parse_simple_date(end_date, elements[1].strip())
date.end = end_date
elif line.find('CAL.') >= 0:
date.mode = 'calculated'
self.parse_simple_date(date, line[5:])
elif line.find('EST.') >= 0:
date.mode = 'estimated'
self.parse_simple_date(date, line[5:])
elif line.find('ABT.') >= 0:
date.mode = 'about'
self.parse_simple_date(date, line[5:])
else:
date.mode = 'simple'
self.parse_simple_date(date, line)
def parse_simple_date(self, date, line):
elements = line.rsplit(' ')
if len(elements) == 1:
date.year = int(elements[0])
date.month = None
date.day = None
elif len(elements) == 2:
date.month = MONTHS[elements[0].upper()]
date.year = int(elements[1])
date.day = None
elif len(elements) == 3:
date.year = int(elements[2])
date.month = MONTHS[elements[1].upper()]
date.day = int(elements[0])
def read_time(self, value):
date = self.objectstack[-1]
self.parse_time(date, value)
def parse_time(self, date, line):
elements = line.rsplit(':')
date.hour = int(elements[0])
date.minute = int(elements[1])
date.seconde = int(elements[2])
def handle_continue(self, text):
value = '\n'
if text != None:
value = '\n' + text
self.handle_concat_continue(value)
def handle_concat(self, text):
self.handle_concat_continue(text)
def handle_concat_continue(self, text):
if type(self.objectstack[-1]) is Address:
self.objectstack[-1].text = self.objectstack[-1].text + text
elif len(self.objectstack) > 1 and self.objectstack[-1] == 'FILE':
self.objectstack[-2].filename = self.objectstack[-2].filename + text
elif len(self.objectstack) > 1 and self.objectstack[-1] == 'TITL':
self.objectstack[-2].title = self.objectstack[-2].title + text
elif len(self.objectstack) > 1 and self.objectstack[-1] == 'TEXT':
self.objectstack[-2].text = self.objectstack[-2].text + text
elif len(self.objectstack) > 1 and self.objectstack[-1] == 'AUTH':
self.objectstack[-2].originator = self.objectstack[-2].originator + text
elif len(self.objectstack) > 1 and self.objectstack[-1] == 'PUBL':
self.objectstack[-2].publication = self.objectstack[-2].publication + text
else:
self.objectstack[-1].text = self.objectstack[-1].text + text
self.objectstack.append(text)
def handle_name(self, personal_name):
if type(self.objectstack[-1]) == Submitter:
self.objectstack[-1].name = personal_name
self.objectstack.append(personal_name)
else:
name = Individual_Name()
self.objectstack[-1].names.append(name)
self.objectstack[-1].personal_name = personal_name
self.objectstack.append(name)
def handle_address(self, address_line):
address = self.objectstack[-1].address = Address()
self.objectstack.append(address)
if address_line != None:
address.text = address_line
def handle_address_postal_code(self, postal_code):
self.objectstack[-1].postal_code = postal_code
self.objectstack.append(postal_code)
def handle_address_line1(self, address_line1):
self.objectstack[-1].address_line1 = address_line1
self.objectstack.append(address_line1)
def handle_address_line2(self, address_line2):
self.objectstack[-1].address_line2 = address_line2
self.objectstack.append(address_line2)
def handle_address_city(self, city):
self.objectstack[-1].city = city
self.objectstack.append(city)
def handle_address_state(self, state):
self.objectstack[-1].state = state
self.objectstack.append(state)
def handle_address_contry(self, contry):
self.objectstack[-1].contry = contry
self.objectstack.append(contry)
def handle_phone_number(self, phone_number):
phone = self.objectstack[-1].phone_number = PhoneNumber()
self.objectstack.append(phone)
if phone_number != None:
phone.value = phone_number
def handle_asso(self, ident):
association = None
# test if association is present in list of associations already read
if ident != None:
if ident in self.objects['ASSO']:
association = self.objects['ASSO'][ident]
else:
association = Association()
association.ident = ident
self.objects['ASSO'][ident] = association
else:
association = Association()
self.objectstack[-1].associations.append(association)
self.objectstack.append(association)
def handle_type(self, type):
self.objectstack[-1].type = type
self.objectstack.append(type)
def handle_relation(self, relation):
self.objectstack[-1].relation = relation
self.objectstack.append(relation)
def handle_change_date(self):
change = Change()
self.objectstack[-1].change = change
self.objectstack.append(change)
def handle_family_child_pedigree(self, pedigree):
self.objectstack[-1].pedigree = pedigree
self.objectstack.append(pedigree)
def handle_family_child(self, ident):
family = self.read_family(ident)
#self.objectstack[-2].familychildren.append(family)
def handle_event_age(self, age):
self.objectstack[-1].age = age
self.objectstack.append(age)
def handle_event_responsible_agency(self, responsible_agency):
self.objectstack[-1].responsible_agency = responsible_agency
self.objectstack.append(responsible_agency)
def handle_event_cause(self, cause):
self.objectstack[-1].cause = cause
self.objectstack.append(cause)
def handle_event(self, type):
event = Event()
event.type = type
self.objectstack[-1].events.append(event)
self.objectstack.append(event)
def handle_individual_sex(self, value):
self.objectstack[-1].sex = value
self.objectstack.append(value)
def handle_header_version(self, version):
self.objectstack[-1].version = version
self.objectstack.append(version)
def handle_filename(self, filename):
self.objectstack[-1].filename = filename
self.objectstack.append('FILE')
def handle_text(self, text):
self.objectstack[-1].text = text
self.objectstack.append('TEXT')
def handle_originator(self, originator):
self.objectstack[-1].originator = originator
self.objectstack.append('AUTH')
def handle_publication(self, publication):
self.objectstack[-1].publication = publication
self.objectstack.append('PUBL')
def handle(self, tuple):
# All line except end of file
# detect end of object description by a level decrease
if (tuple[1] != 'TRLR'):
# when the processed tuple have a level lesser than the current level
# we finish to work on current object
while tuple[0] <= self.currentlevel:
poped = self.objectstack.pop()
self.currentlevel = self.currentlevel - 1
self.currentlevel = tuple[0]
# Start of file
# ######################################################################
# All level
# ######################################################################
# Header specific structure elements
if tuple[1] == 'VERS':
self.handle_header_version(tuple[3])
# Address specific structure elements
elif tuple[1] == 'ADDR':
self.handle_address(tuple[3])
elif tuple[1] == 'ADR1':
self.handle_address_line1(tuple[3])
elif tuple[1] == 'ADR2':
self.handle_address_line2(tuple[3])
elif tuple[1] == 'CITY':
self.handle_address_city(tuple[3])
elif tuple[1] == 'STAE':
self.handle_address_state(tuple[3])
elif tuple[1] == 'POST':
self.handle_address_postal_code(tuple[3])
elif tuple[1] == 'CTRY':
self.handle_address_contry(tuple[3])
elif tuple[1] == 'PHON':
self.handle_phone_number(tuple[3])
# Association specific structure elements
elif tuple[1] == 'ASSO':
self.handle_asso(tuple[2])
elif tuple[1] == 'TYPE': # also for event
self.handle_type(tuple[3])
elif tuple[1] == 'RELA':
self.handle_relation(tuple[3])
# Change date specific structure elements
elif tuple[1] == 'CHAN':
self.handle_change_date()
# Child to family specific structure elements
elif tuple[1] == 'FAMC':
self.handle_family_child(tuple[2])
elif tuple[1] == 'PEDI':
self.handle_family_child_pedigree()
# Event specific structure elements
elif tuple[1] == 'AGE':
self.handle_event_age(tuple[3])
elif tuple[1] == 'AGNC':
self.handle_event_responsible_agency(tuple[3])
elif tuple[1] == 'CAUS':
self.handle_event_cause(tuple[3])
# Family events specific structure elements
elif tuple[1] in ('ANUL', 'CENS', 'DIV', 'DIVF', 'ENGA', 'MARR', 'MARB', 'MARC', 'MARL', 'MARS', 'EVEN'):
self.handle_event(tuple[1])
# Individual attributes specific structure elements
# TODO
elif tuple[1] == 'SEX':
self.handle_individual_sex(tuple[3])
# Individual events specific structure elements
elif tuple[1] in ('BIRT', 'CHR', 'DEAT', 'BURI', 'CREM', 'ADOP', 'BAPM', 'BARM', 'BASM', 'BLES', 'CHRA', 'CONF', 'FCOM', 'ORDN', 'NATU', 'EMIG', 'IMMI', 'CENS', 'PROB', 'WILL', 'GRAD', 'RETI', 'EVEN'):
self.handle_event(tuple[1])
# others
elif tuple[1] == 'DATE':
self.read_date(tuple[3])
elif tuple[1] == 'TITL':
self.objectstack[-1].title = tuple[3]
self.objectstack.append(tuple[1])
elif tuple[1] == 'TEXT':
self.handle_text(tuple[3])
elif tuple[1] == 'CONT':
self.handle_continue(tuple[3])
elif tuple[1] == 'CONC':
self.handle_concat(tuple[3])
elif tuple[1] == 'FILE':
self.handle_filename(tuple[3])
# Source specific structure elements
elif tuple[1] == 'AUTH':
self.handle_originator(tuple[3])
elif tuple[1] == 'PUBL':
self.handle_publication(tuple[3])
# Nothing to do
elif tuple[1] in ('ABBR'):
self.objectstack.append(tuple[1])
# ######################################################################
# Level 0
# ######################################################################
elif tuple[0] == 0:
if tuple[1] == 'HEAD':
self.familyTree = FamilyTree()
self.objectstack.append(self.familyTree)
elif tuple[1] == 'INDI':
self.read_individual(tuple[2])
elif tuple[1] == 'FAM':
self.read_family(tuple[2])
elif tuple[1] == 'SOUR':
self.read_source(tuple[2])
elif tuple[1] == 'NOTE':
self.read_note(tuple[2], tuple[3])
elif tuple[1] == 'SUBM':
self.read_submitter(tuple[2])
elif tuple[1] == 'TRLR':
pass
else:
print('not handled', tuple)
self.objectstack.append(tuple[1])
# ######################################################################
# Level 1
# ######################################################################
elif tuple[0] == 1:
if tuple[1] == 'NAME':
self.handle_name(tuple[3])
# Spouse of a family (father or mother)
elif tuple[1] == 'FAMS':
family = self.read_family(tuple[2])
self.objectstack[-2].familyspouse.append(family)
elif tuple[1] == 'HUSB':
individual = self.read_individual(tuple[2])
self.objectstack[-2].husband = individual
elif tuple[1] == 'WIFE':
individual = self.read_individual(tuple[2])
self.objectstack[-2].wife = individual
elif tuple[1] == 'CHIL':
individual = self.read_individual(tuple[2])
self.objectstack[-2].children.append(individual)
elif tuple[1] == 'SOUR':
if isinstance(self.objectstack[-1], FamilyTree):
source = FamilyTreeSource()
source.system_id = tuple[3]
self.objectstack[-1].source = source
self.objectstack.append(source)
else:
source = self.read_source(tuple[2])
self.objectstack[-2].sources.append(source)
elif tuple[1] == 'COPR':
self.objectstack[-1].copyright = tuple[3]
self.objectstack.append(tuple[1])
elif tuple[1] == 'GEDC':
info = GedcomInfo()
self.objectstack[-1].info = info
self.objectstack.append(info)
elif tuple[1] == 'CHAR':
self.objectstack[-1].charset = tuple[3]
self.objectstack.append(tuple[1])
elif tuple[1] == 'LANG':
self.objectstack[-1].langage = tuple[3]
self.objectstack.append(tuple[1])
elif tuple[1] == 'OBJE':
anobject = Object(None)
self.objectstack[-1].objects.append(anobject)
self.objectstack.append(anobject)
elif tuple[1] == 'SUBM':
submitter = self.read_submitter(tuple[2])
elif tuple[1] == 'OCCU':
event = Event()
event.type = 'OCCU'
event.text = tuple[3]
self.objectstack[-1].events.append(event)
self.objectstack.append(event)
elif tuple[1] == 'NOTE':
note = self.read_note(tuple[2], tuple[3])
else:
print('not handled', tuple)
self.objectstack.append(tuple[1])
# ######################################################################
# Level 2
# ######################################################################
elif tuple[0] == 2:
if tuple[1] == 'NAME':
self.objectstack[-1].name = tuple[3]
self.objectstack.append(tuple[3])
# firstname of an individual
elif tuple[1] == 'GIVN':
self.objectstack[-1].firstname = tuple[3]
self.objectstack.append(tuple[3])
# last name of an individual
elif tuple[1] == 'SURN':
self.objectstack[-1].lastname = tuple[3]
self.objectstack.append(tuple[3])
# last name prefix of an individual
elif tuple[1] == 'SPFX':
self.objectstack[-1].firstname_prefix = tuple[3]
self.objectstack.append(tuple[3])
# prefix of an individual name
elif tuple[1] == 'NPFX':
self.objectstack[-1].prefix = tuple[3]
self.objectstack.append(tuple[3])
# suffix of an individual name
elif tuple[1] == 'NSFX':
self.objectstack[-1].suffix = tuple[3]
self.objectstack.append(tuple[3])
# nickname of an individual name
elif tuple[1] == 'NICK':
self.objectstack[-1].nickname = tuple[3]
self.objectstack.append(tuple[3])
# a place (for events...)
elif tuple[1] == 'PLAC':
place = self.objectstack[-1].place = Place()
place.place = tuple[3]
self.objectstack.append(place)
elif tuple[1] == 'NOTE':
note = self.read_note(tuple[2], tuple[3])
elif tuple[1] == 'FORM':
self.objectstack[-1].format = tuple[3]
self.objectstack.append(tuple[3])
elif tuple[1] == 'DATA':
data = SourceData()
self.objectstack[-1].data = data
self.objectstack.append(data)
elif tuple[1] == 'SOUR':
source = self.read_source(tuple[2])
self.objectstack[-2].sources.append(source)
elif tuple[1] == 'CORP':
corporation = Corporation()
corporation.name = tuple[3]
self.objectstack[-1].corporation = corporation
self.objectstack.append(corporation)
elif tuple[1] == 'TIME':
self.read_time(tuple[3])
self.objectstack.append(tuple[1])
elif tuple[1] == 'QUAY':
self.objectstack[-1].certainty = tuple[3]
self.objectstack.append(tuple[3])
else:
print('not handled', tuple)
self.objectstack.append(tuple[1])
# ######################################################################
# Level 3
# ######################################################################
elif tuple[0] == 3:
# a time (for events...)
if tuple[1] == 'TIME':
self.parse_time(self.objectstack[-1], tuple[3])
self.objectstack.append(tuple[3])
# a map infos for a place
elif tuple[1] == 'MAP':
amap = self.objectstack[-1].mapplace = Map()
self.objectstack.append(amap)
elif tuple[1] == 'QUAY':
self.objectstack[-1].certainty = tuple[3]
self.objectstack.append(tuple[3])
elif tuple[1] == 'DATA':
data = SourceData()
self.objectstack[-1].data = data
self.objectstack.append(data)
else:
print('not handled', tuple)
self.objectstack.append(tuple[1])
# ######################################################################
# Level 4
# ######################################################################
elif tuple[0] == 4:
# latitude for map
if tuple[1] == 'LATI':
self.objectstack[-1].latitude = float(tuple[3])
self.objectstack.append(float(tuple[3]))
# longitude for map
elif tuple[1] == 'LONG':
self.objectstack[-1].longitude = float(tuple[3])
self.objectstack.append(float(tuple[3]))
else:
print('not handled', tuple)
self.objectstack.append(tuple[1])
else:
print('not handled', tuple)
self.objectstack.append(tuple[1])
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import decimal
from datetime import datetime as dt
from py4j.java_gateway import JavaGateway
class Bridge(object):
def __init__(self):
gateway = JavaGateway()
self.point = gateway.entry_point
def format(self, obj):
if obj !=0 and not obj:
return ''
if isinstance(obj, (int, float, long, basestring)):
return obj
elif isinstance(obj, dt):
return str(obj)
elif isinstance(obj, decimal.Decimal):
return float(obj)
elif isinstance(obj, list):
jlist = self.point.create_list()
for obj1 in obj:
jlist.add(self.format(obj1))
return jlist
elif isinstance(obj, dict):
jhashmap = self.point.create_hashmap()
for key, obj1 in obj.iteritems():
jhashmap.put(key, self.format(obj1));
return jhashmap
def report(self, im, ou, header, style):
return self.point.report(im, ou, header, style) | Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import decimal
from datetime import datetime as dt
from py4j.java_gateway import JavaGateway
class Bridge(object):
def __init__(self):
gateway = JavaGateway()
self.point = gateway.entry_point
def format(self, obj):
if obj !=0 and not obj:
return ''
if isinstance(obj, (int, float, long, basestring)):
return obj
elif isinstance(obj, dt):
return str(obj)
elif isinstance(obj, decimal.Decimal):
return float(obj)
elif isinstance(obj, list):
jlist = self.point.create_list()
for obj1 in obj:
jlist.add(self.format(obj1))
return jlist
elif isinstance(obj, dict):
jhashmap = self.point.create_hashmap()
for key, obj1 in obj.iteritems():
jhashmap.put(key, self.format(obj1));
return jhashmap
def report(self, im, ou, header, style):
return self.point.report(im, ou, header, style) | Python |
#!/usr/bin/env python
import sys
import readline
import rlcompleter
readline.parse_and_bind("tab: complete")
from rflib import *
intro = """'RfCat, the greatest thing since Frequency Hopping!'
Don't you wish this were a CLI!? Sorry. Maybe soon...
For now, enjoy the raw power of rflib, or write your own device-specific CLI!
currently your environment has an object called "d" for dongle. this is how
you interact with the rfcat dongle, for :
>>> d.ping()
>>> d.setFreq(433000000)
>>> d.setMdmModulation(MOD_ASK_OOK)
>>> d.makePktFLEN(250)
>>> d.RFxmit("HALLO")
>>> d.RFrecv()
>>> print d.reprRadioConfig()
"""
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-r', '--research', default=False, action="store_true", help='Interactive Python and the "d" instance to talk to your dongle. melikey longtime.')
parser.add_argument('-i', '--index', default=0, type=int)
ifo = parser.parse_args()
if ifo.research:
interactive(ifo.index, DongleClass=RfCat, intro=intro)
else:
# do the full-rfcat thing
d = RfCat(ifo.index)
d.rf_configure(**ifo.__dict__)
d.rf_redirection((sys.stdin, sys.stdout))
| Python |
#!/usr/bin/python
import re
import os
import sys
import cmd
import socket
import threading
from rflib import *
DATA_START_IDX = 4 # without the app/cmd/len bytes, the data starts at byte 4
def splitargs(cmdline):
cmdline = cmdline.replace('\\\\"', '"').replace('\\"', '')
patt = re.compile('\".+?\"|\S+')
for item in cmdline.split('\n'):
return [s.strip('"') for s in patt.findall(item)]
RX = RFST_SRX
TX = RFST_STX
IDLE = RFST_SIDLE
CAL = RFST_SCAL
SYNC_MODES = {
"NONE" : SYNCM_NONE,
"15/16" : SYNCM_15_of_16,
"16/16" : SYNCM_16_of_16,
"CS" : SYNCM_CARRIER,
"CS15/16" : SYNCM_CARRIER_15_of_16,
"CS16/16" : SYNCM_CARRIER_16_of_16,
"CS30/32" : SYNCM_CARRIER_30_of_32,
}
READLINE_MAX_READ_LEN = 1000
class FileSocket(socket.socket):
def __init__(self, sock):
self._buf = ''
self._sock = sock
def __getattr__(self, attr):
if hasattr(self._sock, attr):
return getattr(self._sock, attr)
elif hasattr(self, attr):
return getattr(self.__class__, attr)
def write(self, data):
return self.sendall(data)
def read(self, maxlen):
return self.recv(maxlen)
def readline(self):
idx = self._buf.find('\n')
while idx == -1:
self._buf += self.read(READLINE_MAX_READ_LEN)
idx = self._buf.find('\n')
data = self._buf[:idx]
self._buf = self._buf[idx+1:]
return data
def flush(self):
pass
class KillCfgLoop(Exception):
pass
class CC1111NIC_Server(cmd.Cmd):
intro = """
welcome to the cc1111usb interactive config tool. hack fun!
"""
def __init__(self, nicidx=0, ip='0.0.0.0', nicport=1900, cfgport=1899, go=True, printable=False, rawinput=False):
cmd.Cmd.__init__(self)
self.use_rawinput = rawinput
self.printable = printable
self.nic = FHSSNIC(nicidx)
self._ip = ip
self._nicport = nicport
self._nicsock = None
self._cfgport = cfgport
self._cfgsock = None
self._cfgthread = None
self._pause = False
self.startConfigThread()
if go:
self.start()
def start(self):
self._go = True
while self._go:
# serve the NIC port
try:
self._nicsock = socket.socket()
s = self._nicsock
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind((self._ip, self._nicport))
s.listen(100)
while True:
# implement pipe between the usb RF NIC and the TCP socket
try:
print >>sys.stderr,("Listening for NIC connection on port %d" % self._nicport)
self._nicsock = s.accept()
rs, addr = self._nicsock
rs.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
print "== received DATA connection from %s:%d ==" % (addr)
while True:
x,y,z = select.select([rs, ], [], [], .1)
if self._pause:
continue
if rs in x:
data = rs.recv(self.nic.max_packet_size)
if not len(data): # terminated socket
break
#FIXME: probably want to take in a length struct here and then only send when we have that many bytes...
if self.use_rawinput:
data = eval('"%s"'%data)
self.nic.RFxmit(data)
try:
data, time = self.nic.RFrecv(0)
if self.printable:
data = "\n"+str(time)+": "+repr(data)
else:
data = struct.pack("<L", time) + struct.pack("<H", len(data)) + data
rs.sendall(data)
except ChipconUsbTimeoutException:
pass
print >>sys.stderr,("NIC connection on port %d terminated" % self._nicport)
except KeyboardInterrupt:
self._go = False
break
except:
sys.excepthook(*sys.exc_info())
except KeyboardInterrupt:
self._go = False
except:
sys.excepthook(*sys.exc_info())
def startConfigThread(self):
self._cfgthread = threading.Thread(target=self._cfgRun)
self._cfgthread.setDaemon(True)
self._cfgthread.start()
def _cfgRun(self):
self._cfgsock = socket.socket()
s = self._cfgsock
s.bind((self._ip, self._cfgport))
s.listen(100)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
while True:
try:
self._cfgsock = s.accept()
rs,addr = self._cfgsock
rs.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
print "== received CONFIG connection from %s:%d ==" % (addr)
self.stdin = FileSocket(rs)
self.stdout = self.stdin
self.stderr = self.stdin
while True:
try:
self.cmdloop()
except KillCfgLoop:
break
except:
sys.excepthook(*sys.exc_info())
except:
sys.excepthook(*sys.exc_info())
def do_EOF(self, line):
self.Print("stopping the command loop now..")
#ok so just kill the connection :)
def Print(self, info):
print >>self.stdout,(info)
def do_stop(self, line):
"""
stop the nic
"""
self._pause = True
def do_start(self, line):
"""
start the nic
"""
self._pause = False
#### configuration ####
def do_rfmode(self, line):
'''
* RFMODE - set the radio in RX/IDLE/TX/CAL (CAL returns to IDLE)
'''
if len(line):
try:
self.nic.poke(X_RFST, eval(line))
except:
sys.excepthook(*sys.exc_info())
else:
self.Print(repr(self.getMARCSTATE()))
def do_calibrate(self, line):
'''
* CALIBRATE - force the radio to recalibrate. VCO characteristics will change with temperature and supply voltage changes
'''
self.Print("Calibrating radio...")
self.nic.setModeCAL()
while (self.getMARCSTATE()[1] not in (MARC_STATE_IDLE, MARC_STATE_RX, MARC_STATE_TX)):
sys.stdout.write('.')
self.Print("done calibrating.")
def do_modeTX(self, line):
'''
* modeTX - force the radio to the TX state. this should transmit a CARRIER only,
since there is no data which follows quickly. when the radio is placed in TX mode,
no TX_UNF timeouts occur unless at least some data is sent
'''
self.Print("Calibrating radio...")
self.nic.setModeTX()
while (self.getMARCSTATE()[1] not in (MARC_STATE_TX)):
sys.stdout.write('.')
self.Print("Radio has reached the TX state.")
def do_modeRX(self, line):
'''
* modeRX - force the radio to the RX state. this will allow the radio to receive
transmitted data and handle it. if an RX-timeout has been configured (not default)
then the radio should return to Idle state if the RX timeout is reached without
receiving a packet. if a packet is received, the radio returns to whatever state
it is configured to in MCSM1
'''
self.Print("Radio entering RX state...")
self.nic.setModeTX()
while (self.getMARCSTATE()[1] not in (MARC_STATE_RX)):
sys.stdout.write('.')
self.Print("Radio has reached the RX state.")
def do_modeIDLE(self, line):
'''
* modeIDLE - force the radio to the Idle state. no packets will be received or sent.
when exceptions occur in the radio, it must always be placed in the Idle state before
entering RX or TX. this is done automatically in the firmware (default for RX_OVF
and TX_UNF)
'''
self.Print("Radio entering IDLE state...")
self.nic.setModeIDLE()
while (self.getMARCSTATE()[1] not in (MARC_STATE_IDLE)):
sys.stdout.write('.')
self.Print("Radio has reached the IDLE state.")
def do_modeFSTXON(self, line):
'''
* modeFSTXON
'''
self.Print("Radio entering FSTXON state...")
self.nic.setModeFSTXON()
while (self.getMARCSTATE()[1] not in (MARC_STATE_FSTXON)):
sys.stdout.write('.')
self.Print("Radio has reached the FSTXON state.")
def do_modulation(self, line):
'''
* MODULATION - set the RF modulation scheme. values include "2FSK", "GFSK", "MSK", "ASK_OOK". note: GFSK/OOK/ASK only up to 250kbaud, MSK only above 26kbaud and no manchester encoding.
'''
if not len(line) or line not in ("2FSK", "GFSK", "MSK", "ASK_OOK"):
self.Print('need to give me one of the values "2FSK", "GFSK", "MSK", "ASK_OOK" got: "%s"' % line)
return
mod = eval("MOD_"+line.strip())
self.nic.setModeIDLE()
self.nic.setMdmModulation(mod)
self.nic.setModeRX()
def complete_modulation(self, text, line, begidx, endidx):
self.Print("complete_modulation: %s %s %s %s") % (repr(text), repr(line), repr(begidx), repr(endidx))
def do_baud(self, line):
'''
* baud <BAUDRATE> - set the datarate, then recalculate channel bandwidth, intermediate frequency, deviation, and offset
'''
baud = int(line)
self.nic.setMdmDRate(baud)
self.nic.calculateMdmDeviatn()
self.nic.calculatePktChanBW()
self.nic.calculateFsIF()
self.nic.calculateFsOffset()
def do_bw(self, line):
'''
* bw [channel_bandwidth] - allow the setting of bandwidth settings separately from "baud"
'''
if len(line):
bw = int(line)
self.nic.setMdmChanBW(bw)
else:
self.Print(self.nic.getMdmChanBW())
def do_drate(self, line):
'''
* drate [datarate hz] - allow the setting of datarate settings separately from "baud"
'''
if len(line):
baud = int(line)
self.nic.setMdmDRate(baud)
else:
self.Print(self.nic.getMdmDRate())
def do_chanspc(self, line):
'''
* chanspc [spacing hz] - set channel spacing
'''
if len(line):
chanspc = int(line)
self.nic.setMdmChanSpc(chanspc)
else:
self.Print(self.nic.getMdmChanSpc())
def do_channel(self, line):
'''
* channel [chan_num] - set the channel
'''
if len(line):
channr = int(line)
self.nic.setChannel(channr)
else:
self.Print(self.nic.getChannel())
def do_freq(self, line):
'''
* freq [frequency hz] - set the base frequency. CHANNL and CHAN_SPC are used to calculate positive offset from this.
'''
if len(line):
freq = int(line)
self.nic.setFreq(freq)
else:
self.Print(self.nic.getFreq())
def do_intfreq(self, line):
'''
* intfreq [IF] - allow the setting of Intermediate Frequency separately from "baud"
'''
if len(line):
IF = int(line)
self.nic.setFsIF(IF)
else:
self.Print(self.nic.getFsIF())
def do_freqoff(self, line):
'''
* freqoff [IF] - allow the setting of Frequency Offset separately from "baud"
'''
if len(line):
fo = int(line)
self.nic.setFsOffset(fo)
else:
self.Print(self.nic.getFsOffset())
def do_vlen(self, line):
'''
* VLEN - configure the NIC for variable-length packets. provide max packet size (FLEN to switch to Fixed)
'''
maxlen = int(line)
self.nic.makePktVLEN(maxlen)
def do_flen(self, line):
'''
* FLEN # - configure the NIC for fixed-length packets. provide packet size (VLEN to switch to Variable)
'''
length = int(line)
self.nic.makePktFLEN(length)
def do_syncword(self, line):
'''
* SYNCWORD #### [double]- set the SYNC word (SYNC1 and SYNC0) (double tells the radio to repeat SYNCWORD twice)
'''
if len(line):
syncword = int(line)
self.setMdmSyncWord(syncword)
else:
self.Print(self.getMdmSyncWord())
def do_syncmode(self, line):
'''
* SYNCMODE - set the SYNCMODE. values include "NONE", "15/16", "16/16", "CS", "CS15/16", "CS16/16", "CS30/32"
'''
if len(line):
syncmode = SYNC_MODES.get(line)
if syncmode is None:
self.Print("please provide a *valid* sync-mode. see the help.")
self.Print(self.nic.setMdmSyncMode(syncmode))
else:
self.Print(self.nic.getMdmSyncMode())
def do_pqt(self, line):
'''
* PQT - set the Preamble Quality Threshold. provide the number of bits (multiple of 4) for PQT. values will be rounded down. 0-3 disables PQT checking.
'''
if len(line):
pqt = int(line)
self.setPktPQT(pqt)
else:
self.Print(self.getPktPQT())
def do_addr(self, line):
'''
* ADDR - configure the NIC's ADDRESS
'''
if len(line):
addr = int(line)
self.nic.setPktAddr(addr)
else:
self.Print(self.nic.getPktAddr())
def do_addr_chk(self, line):
'''
* ADDR_CHK - filter based on the optional address byte. values include "NOCHK", "FULL", "BCAST", indicating no filtering, full filtering, and filtering with broadcasts
'''
if len(line):
addr = int(line)
self.nic.setAddr(addr)
else:
self.Print(self.nic.getAddr())
def do_datawhiten(self, line):
'''
* DATAWHITEN - configure data whitening, include 9-bit PN9 xor sequence in command
'''
if len(line):
if line.startswith("off") or line.startswith("OFF"):
self.nic.setEnablePktDataWhitening(False)
else:
self.nic.setEnablePktDataWhitening(True)
else:
self.Print(self.nic.getPktDataWhitening())
def do_manchester(self, line):
'''
* MANCHESTER [ON | OFF] - configure Manchester encoding to enhance successful transmission. cannot use with MSK modulation or the FEC/Interleaver.
'''
if len(line):
if line.startswith("off") or line.startswith("OFF"):
self.nic.setEnableMdmManchester(False)
else:
self.nic.setEnableMdmManchester(True)
else:
self.Print(self.nic.getEnableMdmManchester())
def do_fec(self, line):
'''
* FEC - enable/disable Forward Error Correction. only works with FIXED LENGTH packets.
'''
if len(line):
if line.startswith("off") or line.startswith("OFF"):
self.nic.setEnableMdmFEC(False)
else:
self.nic.setEnableMdmFEC(True)
else:
self.Print(self.nic.getEnableMdmFEC())
def do_crc(self, line):
'''
* CRC - enable/disable Cyclic Redundancy Check. the last two bytes of a packet will be
considered CRC16 bytes, helpful for determining bad packets.
'''
if len(line):
if line.startswith("off") or line.startswith("OFF"):
self.nic.setEnablePktCRC(False)
else:
self.nic.setEnablePktCRC(True)
else:
self.Print(self.nic.getEnablePktCRC())
def do_DEM_DCFILT(self, line):
'''
* DEM_DCFILT - enable/disable digital DC blocking filter before demodulator. typically not good to muck with.
'''
if len(line):
if line.startswith("off") or line.startswith("OFF"):
self.nic.setEnableMdmDCFilter(False)
else:
self.nic.setEnableMdmDCFilter(True)
else:
self.Print(self.nic.getEnableMdmDCFilter())
def do_MAGN_TARGET(self, line):
'''
* MAGN_TARGET - configure Carrier Sense
'''
if len(line):
pass
else:
pass
def do_MAC_LNA_GAIN(self, line):
'''
* MAX_LNA_GAIN - configure Carrier Sense Threshold
'''
if len(line):
pass
else:
pass
def do_MAX_DVGA_GAIN(self, line):
'''
* MAX_DVGA_GAIN - configure Carrier Sense Threshold (use
'''
if len(line):
pass
else:
pass
def do_CARRIER_SENSE_ABS_THR (self, line):
'''
* CARRIER_SENSE_ABS_THR - configure Carrier Sense Absolute Threshold - values include "6", "10", "14" indicating the dB increase in RSSI
'''
if len(line):
pass
else:
pass
def do_CARRIER_SENSE_REL_THR (self, line):
'''
* CARRIER_SENSE_REL_THR - configure Carrier Sense Relative Threshold - values include "DISABLE", and -7 thru +7 to indicate dB from MAGN_TARGET setting
'''
if len(line):
pass
else:
pass
def do_cca_mode (self, line):
'''
* CCA_MODE - select the Clear Channel Assessment mode. values include "DISABLE", "RSSI", "RECVING", "BOTH".
'''
if len(line):
pass
else:
pass
def do_PA_POWER (self, line):
'''
* PA_POWER - select which PATABLE to use for power settings (0-7) (see CC1110/CC1111 manual SWRS033G section 13.15 and 13.16)
'''
if len(line):
pass
else:
pass
def do_FS_AUTOCAL (self, line):
'''
* FS_AUTOCAL - select mode of auto-VCO-calibration. values include "ON", "OFF", "MANUAL", indicating that calibration should be done when turning the synthesizer ON/OFF or manually
'''
if len(line):
pass
else:
pass
def do_REGS_TEST(self, line):
'''
* set register: TEST2/TEST1/TEST0
(be very cautious! the firmware expects certain things like return-to-RX, and no RX-timeout, etc..)
'''
def do_REGS_AGCCTRL(self, line):
'''
* set register: AGCCTRL2/1/0
(be very cautious! the firmware expects certain things like return-to-RX, and no RX-timeout, etc..)
'''
def do_REG_PKTCTRL(self, line):
'''
* set register: PKTCTRL1/0
(be very cautious! the firmware expects certain things like return-to-RX, and no RX-timeout, etc..)
'''
def do_REG_PKTLEN(self, line):
'''
* set register: PKTLEN
(be very cautious! the firmware expects certain things like return-to-RX, and no RX-timeout, etc..)
'''
def do_REG_PKTSTATUS(self, line):
'''
* view register: PKTSTATUS
'''
def do_REGS_MDMCFG(self, line):
'''
* set registers: MDMCFG4/3/2/1/0
(be very cautious! the firmware expects certain things like return-to-RX, and no RX-timeout, etc..)
'''
def do_REGS_MCSM(self, line):
'''
* set register: MCSM2/1/0
(be very cautious! the firmware expects certain things like return-to-RX, and no RX-timeout, etc..)
'''
def do_REG_DEVIATN(self, line):
'''
* set register: DEVIATN
(be very cautious! the firmware expects certain things like return-to-RX, and no RX-timeout, etc..)
'''
def do_REGS_BSCFG_F0CCFG(self, line):
'''
* set register: BSCFG / FOCCFG
(be very cautious! the firmware expects certain things like return-to-RX, and no RX-timeout, etc..)
'''
def do_REGS_FSCTRL(self, line):
'''
* set register: FSCTRL1/0
(be very cautious! the firmware expects certain things like return-to-RX, and no RX-timeout, etc..)
'''
def do_REGS_FREQ(self, line):
'''
* set register: FREQ2/1/0
(be very cautious! the firmware expects certain things like return-to-RX, and no RX-timeout, etc..)
'''
def do_REGS_FREND(self, line):
'''
* set register: FREND1/0
(be very cautious! the firmware expects certain things like return-to-RX, and no RX-timeout, etc..)
'''
def do_REGS_PATABLE(self, line):
'''
* set register: PATABLE7/6/5/4/3/2/1/0
(be very cautious! the firmware expects certain things like return-to-RX, and no RX-timeout, etc..)
'''
def do_show_config(self, line):
'''
* show_config - Print a represented string of the radio configuration
'''
self.Print(self.nic.reprRadioConfig())
def do_dump_config(self, line):
'''
* dump_config - Print a hex representation of the radio configuration registers
'''
self.Print(repr(self.nic.getRadioConfig()))
def do_hack_loose_settings(self, line):
'''
* loose - no CRC, no FEC, no Data Whitening, no sync-word, carrier based receive, etc...
'''
def do_upload_config(self, line):
'''
* upload_config - configure the radio using a python repr string provided to the command
(be very cautious! the firmware expects certain things like return-to-RX, and no RX-timeout, etc..)
'''
self.nic.setModeIDLE()
print "loading config from bytes: %s" % repr(line)
self.setRadioConfig(line)
self.nic.setModeRX()
def do_download_config(self, line):
'''
* download_config - pull current config bytes from radio and dump them in a python repr string
'''
self.Print(repr(self.nic.getRadioConfig()))
def do_save_config(self, line):
'''
* save_config <filename> - save the radio configuration to a file you specify
'''
file(line, "wb").write(repr(self.nic.getRadioConfig()))
def do_load_config(self, line):
'''
* load_config
(be very cautious! the firmware expects certain things like return-to-RX, and no RX-timeout, etc..)
'''
config = file(line, "rb").read()
self.nic.setModeIDLE()
print "loading config from bytes: %s" % repr(config)
self.setRadioConfig(config)
self.nic.setModeRX()
def do_peek(self, line):
'''
* peek <xaddr> [len] - view memory at whatever XDATA address (see the code for details on the memory layout)
'''
args = splitargs(line)
if 0 < len(args) < 3:
if len(args) == 1:
args.append('1')
self.Print(self.peek(int(args[0])), int(args[1]).encode('hex'))
else:
self.Print("please provide exactly one xdata address and an optional length!")
def do_poke(self, line):
'''
* poke - update memory at whatever XDATA address (see the code for details on the memory layout)
'''
args = splitargs(line)
try:
self.nic.poke(int(args[0]), args[1].decode('hex'))
except:
self.Print("please provide exactly one xdata address and hex data")
def do_ping(self, line):
'''
* ping - hello? is the dongle still responding?
'''
self.Print("Successful: %d, Failed: %d, Time: %f") % self.ping()
def do_debug_codes(self, line):
'''
* debugcodes - see what the firmware has stored in it's lastCode[] array
'''
self.Print("lastcode: [%x, %x]") % (self.getDebugCodes())
def do_RESET(self, line):
'''
* reset the dongle
'''
self.Print("Sending the RESET command. Please be patient....")
self.nic.RESET()
def do_rssi(self, line):
'''
* get the last RSSI value
'''
self.Print("RSSI: %x" % (ord(self.getRSSI()) & 0x7f) )
def do_lqi(self, line):
'''
* get the last LQI value
'''
self.Print("%x") % (ord(self.getLQI()))
def do_rfregister(self, line):
'''
rfregister <reg> [value] - set or read a RF register
'''
if len(line):
args = splitargs(line)
val = eval(args[0])
try:
if len(args) > 1:
self.nic.setRFRegister(val)
else:
self.Print("%s : %x" % (args[0], ord(self.nic.peek(val))))
except:
self.Print(sys.exc_info())
else:
self.Print("must include the register to get/set")
def do_printable(self, line):
'''
printable y/n - repr() all data sent to TCP socket for readability of binary data
'''
if len(line):
args = splitargs(line)
val = (args[0])
if val.lower()[0] in ('y', '1'):
self.printable = True
else:
self.printable = False
else:
self.printable = True
self.Print("printable output is: %s" % ('disabled','enabled')[self.printable])
def do_rawinput(self, line):
'''
rawinput y/n - accept in pythonic string reprs (eg. \x80 gets translated to the actual byte '\x80')
input data is run through 'eval' before sending to get a real string from your typed binary input.
'''
if len(line):
args = splitargs(line)
val = (args[0])
if val.lower()[0] in ('y', '1'):
self.use_rawinput = True
else:
self.use_rawinput = False
else:
self.use_rawinput = True
self.Print("raw input is: %s" % ('disabled','enabled')[self.use_rawinput])
'''
def getInterruptRegisters(self):
def testTX(self, data="XYZABCDEFGHIJKL"):
def lowball(self, level=1):
def lowballRestore(self):
def getMACthreshold(self):
def setMACthreshold(self, value):
def setFHSSstate(self, state):
def getFHSSstate(self):
def mac_SyncCell(self, CellID=0x0000):
'''
if __name__ == "__main__":
dongleserver = CC1111NIC_Server()
| Python |
import sys
import os
from distutils.core import setup, Extension
packages = ['rflib', 'vstruct', 'vstruct.defs']
mods = []
pkgdata = {}
scripts = ['rfcat', 'rfcat_server',
]
setup (name = 'rfcat',
version = '1.0',
description = "the swiss army knife of subGHz",
author = 'atlas of d00m',
author_email = 'atlas@r4780y.com',
#include_dirs = [],
packages = packages,
package_data = pkgdata,
ext_modules = mods,
scripts = scripts
)
| Python |
import struct
from StringIO import StringIO
import vstruct.primitives as vs_prims
def isVstructType(x):
return isinstance(x, vs_prims.v_base)
class VStruct(vs_prims.v_base):
def __init__(self, bigend=False):
# A tiny bit of evil...
object.__setattr__(self, "_vs_values", {})
vs_prims.v_base.__init__(self)
self._vs_name = self.__class__.__name__
self._vs_fields = []
self._vs_field_align = False # To toggle visual studio style packing
self._vs_padnum = 0
self._vs_fmtbase = '<'
if bigend:
self._vs_fmtbase = '>'
def vsGetClassPath(self):
'''
Return the entire class name (including module path).
'''
return '%s.%s' % (self.__module__, self._vs_name)
def vsParse(self, bytes, offset=0):
"""
For all the primitives contained within, allow them
an opportunity to parse the given data and return the
total offset...
"""
plist = self.vsGetPrims()
fmt = self.vsGetFormat()
size = struct.calcsize(fmt)
vals = struct.unpack(fmt, bytes[offset:offset+size])
for i in range(len(plist)):
plist[i].vsSetParsedValue(vals[i])
def vsEmit(self):
"""
Get back the byte sequence associated with this structure.
"""
fmt = self.vsGetFormat()
r = []
for p in self.vsGetPrims():
r.append(p.vsGetFmtValue())
return struct.pack(fmt, *r)
def vsGetFormat(self):
"""
Return the format specifier which would then be used
"""
# Unpack everything little endian, let vsParseValue deal...
ret = self._vs_fmtbase
for p in self.vsGetPrims():
ret += p.vsGetFormat()
return ret
def vsIsPrim(self):
return False
def vsGetFields(self):
ret = []
for fname in self._vs_fields:
fobj = self._vs_values.get(fname)
ret.append((fname,fobj))
return ret
def vsGetField(self, name):
x = self._vs_values.get(name)
if x == None:
raise Exception("Invalid field: %s" % name)
return x
def vsHasField(self, name):
return self._vs_values.get(name) != None
def vsSetField(self, name, value):
if isVstructType(value):
self._vs_values[name] = value
return
x = self._vs_values.get(name)
return x.vsSetValue(value)
# FIXME implement more arithmetic for structs...
def __ixor__(self, other):
for name,value in other._vs_values.items():
self._vs_values[name] ^= value
return self
def vsAddField(self, name, value):
if not isVstructType(value):
raise Exception("Added fields MUST be vstruct types!")
# Do optional field alignment...
if self._vs_field_align:
# If it's a primitive, all is well, if not, pad to size of
# the first element of the VStruct/VArray...
if value.vsIsPrim():
align = len(value)
else:
fname = value._vs_fields[0]
align = len(value._vs_values.get(fname))
delta = len(self) % align
if delta != 0:
print "PADDING %s by %d" % (name,align-delta)
pname = "_pad%d" % self._vs_padnum
self._vs_padnum += 1
self._vs_fields.append(pname)
self._vs_values[pname] = vs_prims.v_bytes(align-delta)
self._vs_fields.append(name)
self._vs_values[name] = value
def vsGetPrims(self):
"""
return an order'd list of the primitive fields in this
structure definition. This is recursive and will return
the sub fields of all nested structures.
"""
ret = []
for name, field in self.vsGetFields():
if field.vsIsPrim():
ret.append(field)
else:
ret.extend(field.vsGetPrims())
return ret
def vsGetTypeName(self):
return self._vs_name
def vsGetOffset(self, name):
"""
Return the offset of a member.
"""
offset = 0
for fname in self._vs_fields:
if name == fname:
return offset
x = self._vs_values.get(fname)
offset += len(x)
raise Exception("Invalid Field Specified!")
def vsGetPrintInfo(self, offset=0, indent=0, top=True):
ret = []
if top:
ret.append((offset, indent, self._vs_name, self))
indent += 1
for fname in self._vs_fields:
x = self._vs_values.get(fname)
off = offset + self.vsGetOffset(fname)
if isinstance(x, VStruct):
ret.append((off, indent, fname, x))
ret.extend(x.vsGetPrintInfo(offset=off, indent=indent, top=False))
else:
ret.append((off, indent, fname, x))
return ret
def __len__(self):
fmt = self.vsGetFormat()
return struct.calcsize(fmt)
def __getattr__(self, name):
# Gotta do this for pickle issues...
vsvals = self.__dict__.get("_vs_values")
if vsvals == None:
vsvals = {}
self.__dict__["_vs_values"] = vsvals
r = vsvals.get(name)
if r is None:
raise AttributeError(name)
if isinstance(r, vs_prims.v_prim):
return r.vsGetValue()
return r
def __setattr__(self, name, value):
# If we have this field, asign to it
x = self._vs_values.get(name, None)
if x != None:
return self.vsSetField(name, value)
# If it's a vstruct type, create a new field
if isVstructType(value):
return self.vsAddField(name, value)
# Fail over to standard object attribute behavior
return object.__setattr__(self, name, value)
def __iter__(self):
# Our iteration returns name,field pairs
ret = []
for name in self._vs_fields:
ret.append((name, self._vs_values.get(name)))
return iter(ret)
def __repr__(self):
return self._vs_name
def tree(self, va=0, reprmax=None):
ret = ""
for off, indent, name, field in self.vsGetPrintInfo():
rstr = field.vsGetTypeName()
if isinstance(field, vs_prims.v_number):
val = field.vsGetValue()
rstr = '0x%.8x (%d)' % (val,val)
elif isinstance(field, vs_prims.v_prim):
rstr = repr(field)
if reprmax != None and len(rstr) > reprmax:
rstr = rstr[:reprmax] + '...'
ret += "%.8x (%.2d)%s %s: %s\n" % (va+off, len(field), " "*(indent*2),name,rstr)
return ret
class VArray(VStruct):
def __init__(self, elems=()):
VStruct.__init__(self)
for e in elems:
self.vsAddElement(e)
def vsAddElement(self, elem):
"""
Used to add elements to an array
"""
idx = len(self._vs_fields)
self.vsAddField("%d" % idx, elem)
def __getitem__(self, index):
return self.vsGetField("%d" % index)
#FIXME slice asignment
def resolve(impmod, nameparts):
"""
Resolve the given (potentially nested) object
from within a module.
"""
if not nameparts:
return None
m = impmod
for nname in nameparts:
m = getattr(m, nname, None)
if m == None:
break
return m
# NOTE: Gotta import this *after* VStruct/VSArray defined
import vstruct.defs as vs_defs
def getStructure(sname):
"""
Return an instance of the specified structure. The
structure name may be a definition that was added with
addStructure() or a python path (ie. win32.TEB) of a
definition from within vstruct.defs.
"""
x = resolve(vs_defs, sname.split("."))
if x != None:
return x()
return None
def getModuleNames():
return [x for x in dir(vs_defs) if not x.startswith("__")]
def getStructNames(modname):
ret = []
mod = resolve(vs_defs, modname)
if mod == None:
return ret
for n in dir(mod):
x = getattr(mod, n)
if issubclass(x, VStruct):
ret.append(n)
return ret
| Python |
import struct
class v_enum: pass
class v_base(object):
def __init__(self):
self._vs_meta = {}
def vsGetMeta(self, name, defval=None):
return self._vs_meta.get(name, defval)
def vsSetMeta(self, name, value):
self._vs_meta[name] = value
# Sub-classes (primitive base, or VStruct must have these
def vsParse(self, bytes): return NotImplemented
def vsGetFormat(self): return NotImplemented
def vsIsPrim(self): return NotImplemented
def vsGetTypeName(self): return NotImplemented
class v_prim(v_base):
def __init__(self):
v_base.__init__(self)
# Used by base len(),vsGetFormat, etc...
self._vs_value = None
self._vs_length = None
self._vs_fmt = None
def vsIsPrim(self):
return True
def vsGetTypeName(self):
return self.__class__.__name__
def vsParse(self, bytes):
"""
Parser for primitives which assumes we are
calling parse directly.
"""
fmt = "<%s" % self.vsGetFormat()
val = struct.unpack(fmt, bytes)[0]
self.vsSetParsedValue(val)
def vsSetParsedValue(self, value):
"""
Primitives will be assigned their values by a parser
which chops data up with struct format strings. This
method will be called by parsers to assign the value
of a primitive from a struct.unpack call.
"""
self._vs_value = value
def vsGetFmtValue(self):
"""
The emit function uses this to ask each primitive for the
object to be put into the struct pack sequence.
Most objects just return their value...
"""
return self._vs_value
def vsGetValue(self):
"""
Get the type specific value for this field.
(Used by the structure dereference method to return
a python native for the field by name)
"""
return self._vs_value
def vsSetValue(self, value):
"""
Set the type specific value for this field.
"""
self._vs_value = value
def vsGetFormat(self):
return self._vs_fmt
def __repr__(self):
return repr(self.vsGetValue())
def __len__(self):
return self._vs_length
def __str__(self):
return str(self.vsGetValue())
class v_number(v_prim):
def __init__(self, value=0, swapend=False):
v_prim.__init__(self)
self._vs_swapend = swapend
self._vs_length = struct.calcsize(self.vsGetFormat())
self.vsSetValue(value)
def vsSetValue(self, value):
"""
Assure that the value is long() able for all numeric types.
"""
self._vs_value = long(value)
def vsSetParsedValue(self, value):
# We were parsed N endian. Switch if needed.
if self._vs_swapend:
oval = value
value = 0
for i in range(self._vs_length):
value = value << 8
value += (oval >> (8*i)) & 0xff
self.vsSetValue(value)
def vsGetFormat(self):
return self.__class__._vs_fmt
def __int__(self):
return int(self._vs_value)
def __long__(self):
return long(self._vs_value)
##################################################################
# Implement the number API
def __add__(self, other): return long(self) + long(other)
def __sub__(self, other): return long(self) - long(other)
def __mul__(self, other): return long(self) * long(other)
def __div__(self, other): return long(self) / long(other)
def __floordiv__(self, other): return long(self) // long(other)
def __mod__(self, other): return long(self) % long(other)
def __divmod__(self, other): return divmod(long(self), long(other))
def __pow__(self, other, modulo=None): return pow(long(self), long(other), modulo)
def __lshift__(self, other): return long(self) << long(other)
def __rshift__(self, other): return long(self) >> long(other)
def __and__(self, other): return long(self) & long(other)
def __xor__(self, other): return long(self) ^ long(other)
def __or__(self, other): return long(self) | long(other)
# Operator swapped variants
def __radd__(self, other): return long(other) + long(self)
def __rsub__(self, other): return long(other) - long(self)
def __rmul__(self, other): return long(other) * long(self)
def __rdiv__(self, other): return long(other) / long(self)
def __rfloordiv__(self, other): return long(other) // long(self)
def __rmod__(self, other): return long(other) % long(self)
def __rdivmod__(self, other): return divmod(long(other), long(self))
def __rpow__(self, other, modulo=None): return pow(long(other), long(self), modulo)
def __rlshift__(self, other): return long(other) << long(self)
def __rrshift__(self, other): return long(other) >> long(self)
def __rand__(self, other): return long(other) & long(self)
def __rxor__(self, other): return long(other) ^ long(self)
def __ror__(self, other): return long(other) | long(self)
# Inplace variants
def __iadd__(self, other): self.vsSetValue(self+other); return self
def __isub__(self, other): self.vsSetValue(self - other); return self
def __imul__(self, other): self.vsSetValue(self*other); return self
def __idiv__(self, other): self.vsSetValue(self/other); return self
def __ifloordiv__(self, other): self.vsSetValue(self // other); return self
def __imod__(self, other): self.vsSetValue(self % other); return self
def __ipow__(self, other, modulo=None): self.vsSetValue(pow(self, other, modulo)); return self
def __ilshift__(self, other): self.vsSetValue(self << other); return self
def __irshift__(self, other): self.vsSetValue(self >> other); return self
def __iand__(self, other): self.vsSetValue(self & other); return self
def __ixor__(self, other): self.vsSetValue(self ^ other); return self
def __ior__(self, other): self.vsSetValue(self | other); return self
# operator helpers
def __neg__(self): return -(long(self))
def __pos__(self): return +(long(self))
def __abs__(self): return abs(long(self))
def __invert__(self): return ~(long(self))
# index use helper
def __index__(self): return long(self)
def __coerce__(self, other):
try:
return long(self),long(other)
except Exception, e:
return NotImplemented
# Print helpers
def __hex__(self): return hex(long(self))
def __oct__(self): return oct(long(self))
class v_uint8(v_number):
_vs_builder = True
_vs_fmt = "B"
class v_uint16(v_number):
_vs_builder = True
_vs_fmt = "H"
class v_uint32(v_number):
_vs_builder = True
_vs_fmt = "L"
class v_uint64(v_number):
_vs_builder = True
_vs_fmt = "Q"
class v_int8(v_number):
_vs_builder = True
_vs_fmt = "b"
class v_int16(v_number):
_vs_builder = True
_vs_fmt = "h"
class v_int32(v_number):
_vs_builder = True
_vs_fmt = "l"
class v_int64(v_number):
_vs_builder = True
_vs_fmt = "q"
pointersize = struct.calcsize("P")
class v_size_t(v_number):
_vs_builder = True
if pointersize == 4:
_vs_fmt = "L"
else:
_vs_fmt = "Q"
def __repr__(self):
return "0x%.8x" % self._vs_value
class v_ptr(v_size_t):
pass
class v_ptr32(v_ptr):
_vs_builder = True
_vs_fmt = "L"
class v_ptr64(v_ptr):
_vs_builder = True
_vs_fmt = "Q"
class v_bytes(v_prim):
_vs_builder = True
def __init__(self, size=0, vbytes=None):
v_prim.__init__(self)
if vbytes == None:
vbytes = '\x00' * size
self._vs_length = len(vbytes)
self._vs_value = vbytes
def vsGetFormat(self):
return "%ds" % len(self)
def __repr__(self):
return self._vs_value.encode('hex')
class v_str(v_prim):
'''
A string placeholder class which will automagically return
up to a null terminator (and will keep it's size by null
padding when assigned to)
'''
_vs_builder = True
def __init__(self, size=4, val=''):
v_prim.__init__(self)
self._vs_length = size
self._vs_value = val.ljust(size, '\x00')
def vsGetValue(self):
val = v_prim.vsGetValue(self)
return val.split("\x00")[0]
def vsSetValue(self, val):
realval = val.ljust(len(self), '\x00')
v_prim.vsSetValue(self, realval)
def vsGetFormat(self):
return "%ds" % len(self)
def __len__(self):
return len(self._vs_value)
class v_wstr(v_str):
'''
Unicode variant of the above string class
NOTE: the size paramater is in WCHARs!
'''
_vs_builder = True
def __init__(self, size=4, encode='utf-16le', val=''):
v_prim.__init__(self)
b = val.ljust(size, '\x00').encode(encode)
self._vs_length = len(b)
self._vs_value = b
self._vs_encode = encode
def vsGetValue(self):
val = v_prim.vsGetValue(self)
val = val.decode(self._vs_encode)
return val.split("\x00")[0]
def vsSetValue(self, val):
rbytes = val.encode(self._vs_encode)
rbytes = rbytes.ljust(len(self), '\x00')
v_prim.vsSetValue(self, rbytes)
def vsGetFormat(self):
return "%ds" % len(self)
def __len__(self):
return len(self._vs_value)
class GUID(v_prim):
_vs_builder = True
def __init__(self, guidstr=None):
"""
Construct a new GUID primitive. You may specify a GUID string in the
constructor to populate initial values.
"""
v_prim.__init__(self)
self._vs_length = 16
self._vs_value = "\x00" * 16
self._vs_fmt = "16s"
self._guid_fields = (0,0,0,0,0,0,0,0,0,0,0)
if guidstr != None:
self._parseGuidStr(guidstr)
def _parseGuidStr(self, gstr):
gstr = gstr.replace("{","")
gstr = gstr.replace("}","")
gstr = gstr.replace("-","")
bytes = gstr.decode("hex")
# Totally cheating... ;)
self._guid_fields = struct.unpack(">LHH8B", bytes)
def vsSetValue(self, bytes):
self._guid_fields = struct.unpack("<LHH8B", bytes)
def vsGetValue(self):
return struck.pack("<LHH8B", *self._guid_fields)
def __repr__(self):
base = "{%.8x-%.4x-%.4x-%.2x%.2x-%.2x%.2x%.2x%.2x%.2x%.2x}"
return base % self._guid_fields
| Python |
'''
VStruct builder! Used to serialize structure definitions etc...
'''
import types
import inspect
import vstruct
import vstruct.primitives as vs_prim
prim_types = [ None,
vs_prim.v_uint8,
vs_prim.v_uint16,
None,
vs_prim.v_uint32,
None, None, None,
vs_prim.v_uint64
]
# VStruct Field Flags
VSFF_ARRAY = 1
VSFF_POINTER = 2
class VStructConstructor:
def __init__(self, builder, vsname):
self.builder = builder
self.vsname = vsname
def __call__(self, *args, **kwargs):
return self.builder.buildVStruct(self.vsname)
class VStructBuilder:
def __init__(self, defs=(), enums=()):
self._vs_defs = {}
self._vs_enums = {}
self._vs_namespaces = {}
for vsdef in defs:
self.addVStructDef(vsdef)
for enum in enums:
self.addVStructEnumeration(enum)
def __getattr__(self, name):
ns = self._vs_namespaces.get(name)
if ns != None:
return ns
vsdef = self._vs_defs.get(name)
if vsdef != None:
return VStructConstructor(self, name)
raise AttributeError, name
def addVStructEnumeration(self, enum):
self._vs_enums[enum[0]] = enum
def addVStructNamespace(self, name, builder):
self._vs_namespaces[name] = builder
def getVStructNamespaces(self):
return self._vs_namespaces.items()
def getVStructNamespaceNames(self):
return self._vs_namespaces.keys()
def hasVStructNamespace(self, namespace):
return self._vs_namespaces.get(namespace, None) != None
def getVStructNames(self, namespace=None):
if namespace == None:
return self._vs_defs.keys()
nsmod = self._vs_namespaces.get(namespace)
ret = []
for name in dir(nsmod):
nobj = getattr(nsmod, name)
if not inspect.isclass(nobj):
continue
if issubclass(nobj, vstruct.VStruct):
ret.append(name)
return ret
def addVStructDef(self, vsdef):
vsname = vsdef[0]
self._vs_defs[vsname] = vsdef
def buildVStruct(self, vsname):
# Check for a namespace
parts = vsname.split('.', 1)
if len(parts) == 2:
ns = self._vs_namespaces.get(parts[0])
if ns == None:
raise Exception('Namespace %s is not present! (need symbols?)' % parts[0])
# If a module gets added as a namespace, assume it has a class def...
if isinstance(ns, types.ModuleType):
cls = getattr(ns, parts[1])
if cls == None:
raise Exception('Unknown VStruct Definition: %s' % vsname)
return cls()
return ns.buildVStruct(parts[1])
vsdef = self._vs_defs.get(vsname)
if vsdef == None:
raise Exception('Unknown VStruct Definition: %s' % vsname)
vsname, vssize, vskids = vsdef
vs = vstruct.VStruct()
vs._vs_name = vsname
for fname, foffset, fsize, ftypename, fflags in vskids:
if fflags & VSFF_POINTER:
# FIXME support pointers with types!
if fsize == 4:
fieldval = vs_prim.v_ptr32()
elif fsize == 8:
fieldval = vs_prim.v_ptr64()
else:
raise Exception('Invalid Pointer Width: %d' % fsize)
elif fflags & VSFF_ARRAY:
if ftypename != None:
fieldval = vstruct.VArray()
while len(fieldval) < fsize:
fieldval.vsAddElement( self.buildVStruct(ftypename) )
else:
# FIXME actually handle arrays!
fieldval = vs_prim.v_bytes(size=fsize)
elif ftypename == None:
if fsize not in [1,2,4,8]:
#print 'Primitive Field Size: %d' % fsize
fieldval = v_bytes(size=fsize)
else:
fieldval = prim_types[fsize]()
else:
fieldval = self.buildVStruct(ftypename)
cursize = len(vs)
if foffset < cursize:
#print 'FIXME handle unions, overlaps, etc...'
continue
if foffset > cursize:
setattr(vs, '_pad%.4x' % foffset, vs_prim.v_bytes(size=(foffset-cursize)))
setattr(vs, fname, fieldval)
return vs
def genVStructPyCode(self):
ret = 'import vstruct\n'
ret += 'from vstruct.primitives import *'
ret += '\n\n'
for ename, esize, ekids in self._vs_enums.values():
ret += '%s = v_enum()\n' % ename
for kname, kval in ekids:
ret += '%s.%s = %d\n' % (ename,kname,kval)
ret += '\n\n'
for vsname, vsize, vskids in self._vs_defs.values():
ret += 'class %s(vstruct.VStruct):\n' % vsname
ret += ' def __init__(self):\n'
ret += ' vstruct.VStruct.__init__(self)\n'
offset = 0
for fname, foffset, fsize, ftypename, fflags in vskids:
if foffset < offset:
continue
if foffset > offset:
ret += ' self._pad%.4x = v_bytes(size=%d)\n' % (foffset, foffset-offset)
offset += (foffset - offset)
if fflags & VSFF_POINTER:
if fsize == 4:
fconst = 'v_ptr32()'
elif fsize == 8:
fconst = 'v_ptr64()'
else:
fconst = 'v_bytes(size=%d) # FIXME should be pointer!' % fsize
elif fflags & VSFF_ARRAY:
if ftypename != None:
'[ %s() for i in xrange( %d / len(%s())) ]' % (ftypename, fsize, ftypename)
else:
fconst = 'v_bytes(size=%d) # FIXME Unknown Array Type' % fsize
elif ftypename == None:
if fsize == 1:
fconst = 'v_uint8()'
elif fsize == 2:
fconst = 'v_uint16()'
elif fsize == 4:
fconst = 'v_uint32()'
elif fsize == 8:
fconst = 'v_uint64()'
else:
fconst = 'v_bytes(size=%d)' % fsize
else:
fconst = '%s()' % ftypename
ret += ' self.%s = %s\n' % (fname, fconst)
offset += fsize
ret += '\n\n'
return ret
if __name__ == '__main__':
# Parse windows structures from dll symbols...
import os
import sys
import platform
from pprint import pprint
import PE
import vtrace.platforms.win32 as vt_win32
p = PE.PE(file(sys.argv[1], 'rb'))
baseaddr = p.IMAGE_NT_HEADERS.OptionalHeader.ImageBase
osmajor = p.IMAGE_NT_HEADERS.OptionalHeader.MajorOperatingSystemVersion
osminor = p.IMAGE_NT_HEADERS.OptionalHeader.MinorOperatingSystemVersion
machine = p.IMAGE_NT_HEADERS.FileHeader.Machine
archname = PE.machine_names.get(machine)
parser = vt_win32.Win32SymbolParser(0xffffffff, sys.argv[1], baseaddr)
parser.parse()
t = parser._sym_types.values()
e = parser._sym_enums.values()
builder = VStructBuilder(defs=t, enums=e)
print '# Version: %d.%d' % (osmajor, osminor)
print '# Architecture: %s' % archname
print builder.genVStructPyCode()
| Python |
# FIXME this is named wrong!
import vstruct
from vstruct.primitives import *
class CLIENT_ID(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.UniqueProcess = v_ptr()
self.UniqueThread = v_ptr()
class EXCEPTION_RECORD(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.ExceptionCode = v_uint32()
self.ExceptionFlags = v_uint32()
self.ExceptionRecord = v_ptr()
self.ExceptionAddress = v_ptr()
self.NumberParameters = v_uint32()
class EXCEPTION_REGISTRATION(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.prev = v_ptr()
self.handler = v_ptr()
class HEAP(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Entry = HEAP_ENTRY()
self.Signature = v_uint32()
self.Flags = v_uint32()
self.ForceFlags = v_uint32()
self.VirtualMemoryThreshold = v_uint32()
self.SegmentReserve = v_uint32()
self.SegmentCommit = v_uint32()
self.DeCommitFreeBlockThreshold = v_uint32()
self.DeCommitTotalFreeThreshold = v_uint32()
self.TotalFreeSize = v_uint32()
self.MaximumAllocationSize = v_uint32()
self.ProcessHeapsListIndex = v_uint16()
self.HeaderValidateLength = v_uint16()
self.HeaderValidateCopy = v_ptr()
self.NextAvailableTagIndex = v_uint16()
self.MaximumTagIndex = v_uint16()
self.TagEntries = v_ptr()
self.UCRSegments = v_ptr()
self.UnusedUnCommittedRanges = v_ptr()
self.AlignRound = v_uint32()
self.AlignMask = v_uint32()
self.VirtualAllocBlocks = ListEntry()
self.Segments = vstruct.VArray([v_uint32() for i in range(64)])
self.u = vstruct.VArray([v_uint8() for i in range(16)])
self.u2 = vstruct.VArray([v_uint8() for i in range(2)])
self.AllocatorBackTraceIndex = v_uint16()
self.NonDedicatedListLength = v_uint32()
self.LargeBlocksIndex = v_ptr()
self.PseudoTagEntries = v_ptr()
self.FreeLists = vstruct.VArray([ListEntry() for i in range(128)])
self.LockVariable = v_uint32()
self.CommitRoutine = v_ptr()
self.FrontEndHeap = v_ptr()
self.FrontEndHeapLockCount = v_uint16()
self.FrontEndHeapType = v_uint8()
self.LastSegmentIndex = v_uint8()
class HEAP_SEGMENT(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Entry = HEAP_ENTRY()
self.SegmentSignature = v_uint32()
self.SegmentFlags = v_uint32()
self.Heap = v_ptr()
self.LargestUncommitedRange = v_uint32()
self.BaseAddress = v_ptr()
self.NumberOfPages = v_uint32()
self.FirstEntry = v_ptr()
self.LastValidEntry = v_ptr()
self.NumberOfUnCommittedPages = v_uint32()
self.NumberOfUnCommittedRanges = v_uint32()
self.UncommittedRanges = v_ptr()
self.SegmentAllocatorBackTraceIndex = v_uint16()
self.Reserved = v_uint16()
self.LastEntryInSegment = v_ptr()
class HEAP_ENTRY(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Size = v_uint16()
self.PrevSize = v_uint16()
self.SegmentIndex = v_uint8()
self.Flags = v_uint8()
self.Unused = v_uint8()
self.TagIndex = v_uint8()
class ListEntry(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Flink = v_ptr()
self.Blink = v_ptr()
class NT_TIB(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.ExceptionList = v_ptr()
self.StackBase = v_ptr()
self.StackLimit = v_ptr()
self.SubSystemTib = v_ptr()
self.FiberData = v_ptr()
#x.Version = v_ptr() # This is a union field
self.ArbitraryUserPtr = v_ptr()
self.Self = v_ptr()
class PEB(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.InheritedAddressSpace = v_uint8()
self.ReadImageFileExecOptions = v_uint8()
self.BeingDebugged = v_uint8()
self.SpareBool = v_uint8()
self.Mutant = v_ptr()
self.ImageBaseAddress = v_ptr()
self.Ldr = v_ptr()
self.ProcessParameters = v_ptr()
self.SubSystemData = v_ptr()
self.ProcessHeap = v_ptr()
self.FastPebLock = v_ptr()
self.FastPebLockRoutine = v_ptr()
self.FastPebUnlockRoutine = v_ptr()
self.EnvironmentUpdateCount = v_uint32()
self.KernelCallbackTable = v_ptr()
self.SystemReserved = v_uint32()
self.AtlThunkSListPtr32 = v_ptr()
self.FreeList = v_ptr()
self.TlsExpansionCounter = v_uint32()
self.TlsBitmap = v_ptr()
self.TlsBitmapBits = vstruct.VArray([v_uint32() for i in range(2)])
self.ReadOnlySharedMemoryBase = v_ptr()
self.ReadOnlySharedMemoryHeap = v_ptr()
self.ReadOnlyStaticServerData = v_ptr()
self.AnsiCodePageData = v_ptr()
self.OemCodePageData = v_ptr()
self.UnicodeCaseTableData = v_ptr()
self.NumberOfProcessors = v_uint32()
self.NtGlobalFlag = v_uint64()
self.CriticalSectionTimeout = v_uint64()
self.HeapSegmentReserve = v_uint32()
self.HeapSegmentCommit = v_uint32()
self.HeapDeCommitTotalFreeThreshold = v_uint32()
self.HeapDeCommitFreeBlockThreshold = v_uint32()
self.NumberOfHeaps = v_uint32()
self.MaximumNumberOfHeaps = v_uint32()
self.ProcessHeaps = v_ptr()
self.GdiSharedHandleTable = v_ptr()
self.ProcessStarterHelper = v_ptr()
self.GdiDCAttributeList = v_uint32()
self.LoaderLock = v_ptr()
self.OSMajorVersion = v_uint32()
self.OSMinorVersion = v_uint32()
self.OSBuildNumber = v_uint16()
self.OSCSDVersion = v_uint16()
self.OSPlatformId = v_uint32()
self.ImageSubsystem = v_uint32()
self.ImageSubsystemMajorVersion = v_uint32()
self.ImageSubsystemMinorVersion = v_uint32()
self.ImageProcessAffinityMask = v_uint32()
self.GdiHandleBuffer = vstruct.VArray([v_ptr() for i in range(34)])
self.PostProcessInitRoutine = v_ptr()
self.TlsExpansionBitmap = v_ptr()
self.TlsExpansionBitmapBits = vstruct.VArray([v_uint32() for i in range(32)])
self.SessionId = v_uint32()
self.AppCompatFlags = v_uint64()
self.AppCompatFlagsUser = v_uint64()
self.pShimData = v_ptr()
self.AppCompatInfo = v_ptr()
self.CSDVersion = v_ptr()
self.UNKNOWN = v_uint32()
self.ActivationContextData = v_ptr()
self.ProcessAssemblyStorageMap = v_ptr()
self.SystemDefaultActivationContextData = v_ptr()
self.SystemAssemblyStorageMap = v_ptr()
self.MinimumStackCommit = v_uint32()
class SEH3_SCOPETABLE(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.EnclosingLevel = v_int32()
self.FilterFunction = v_ptr()
self.HandlerFunction = v_ptr()
class SEH4_SCOPETABLE(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.GSCookieOffset = v_int32()
self.GSCookieXOROffset = v_int32()
self.EHCookieOffset = v_int32()
self.EHCookieXOROffset = v_int32()
self.EnclosingLevel = v_int32()
self.FilterFunction = v_ptr()
self.HandlerFunction = v_ptr()
class TEB(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.TIB = NT_TIB()
self.EnvironmentPointer = v_ptr()
self.ClientId = CLIENT_ID()
self.ActiveRpcHandle = v_ptr()
self.ThreadLocalStorage = v_ptr()
self.ProcessEnvironmentBlock = v_ptr()
self.LastErrorValue = v_uint32()
self.CountOfOwnedCriticalSections = v_uint32()
self.CsrClientThread = v_ptr()
self.Win32ThreadInfo = v_ptr()
self.User32Reserved = vstruct.VArray([v_uint32() for i in range(26)])
self.UserReserved = vstruct.VArray([v_uint32() for i in range(5)])
self.WOW32Reserved = v_ptr()
self.CurrentLocale = v_uint32()
self.FpSoftwareStatusRegister = v_uint32()
class CLSID(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.uuid = GUID()
class IID(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.uuid = GUID()
| Python |
import vstruct
from vstruct.primitives import *
EI_NIDENT = 4
EI_PADLEN = 7
class Elf32(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.e_ident = v_bytes(EI_NIDENT)
self.e_class = v_uint8()
self.e_data = v_uint8()
self.e_fileversion = v_uint8()
self.e_osabi = v_uint8()
self.e_abiversio = v_uint8()
self.e_pad = v_bytes(EI_PADLEN)
self.e_type = v_uint16()
self.e_machine = v_uint16()
self.e_version = v_uint32()
self.e_entry = v_uint32()
self.e_phoff = v_uint32()
self.e_shoff = v_uint32()
self.e_flags = v_uint32()
self.e_ehsize = v_uint16()
self.e_phentsize = v_uint16()
self.e_phnum = v_uint16()
self.e_shentsize = v_uint16()
self.e_shnum = v_uint16()
self.e_shstrndx = v_uint16()
class Elf32Section(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.sh_name = v_uint32()
self.sh_type = v_uint32()
self.sh_flags = v_uint32()
self.sh_addr = v_uint32()
self.sh_offset = v_uint32()
self.sh_size = v_uint32()
self.sh_link = v_uint32()
self.sh_info = v_uint32()
self.sh_addralign = v_uint32()
self.sh_entsize = v_uint32()
class Elf32Pheader(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.p_type = v_uint32()
self.p_offset = v_uint32()
self.p_vaddr = v_uint32()
self.p_paddr = v_uint32()
self.p_filesz = v_uint32()
self.p_memsz = v_uint32()
self.p_flags = v_uint32()
self.p_align = v_uint32()
class Elf32Reloc(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.r_offset = v_ptr32()
self.r_info = v_uint32()
class Elf32Reloca(Elf32Reloc):
def __init__(self):
Elf32Reloc.__init__(self)
self.r_addend = v_uint32()
class Elf32Symbol(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.st_name = v_uint32()
self.st_value = v_uint32()
self.st_size = v_uint32()
self.st_info = v_uint8()
self.st_other = v_uint8()
self.st_shndx = v_uint16()
class Elf32Dynamic(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.d_tag = v_uint32()
self.d_value = v_uint32()
class Elf64(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.e_ident = v_bytes(EI_NIDENT)
self.e_class = v_uint8()
self.e_data = v_uint8()
self.e_fileversion = v_uint8()
self.e_osabi = v_uint8()
self.e_abiversio = v_uint8()
self.e_pad = v_bytes(EI_PADLEN)
self.e_type = v_uint16()
self.e_machine = v_uint16()
self.e_version = v_uint32()
self.e_entry = v_uint64()
self.e_phoff = v_uint64()
self.e_shoff = v_uint64()
self.e_flags = v_uint32()
self.e_ehsize = v_uint16()
self.e_phentsize = v_uint16()
self.e_phnum = v_uint16()
self.e_shentsize = v_uint16()
self.e_shnum = v_uint16()
self.e_shstrndx = v_uint16()
class Elf64Section(Elf32Section):
def __init__(self):
vstruct.VStruct.__init__(self)
self.sh_name = v_uint32()
self.sh_type = v_uint32()
self.sh_flags = v_uint64()
self.sh_addr = v_uint64()
self.sh_offset = v_uint64()
self.sh_size = v_uint64()
self.sh_link = v_uint32()
self.sh_info = v_uint32()
self.sh_addralign = v_uint64()
self.sh_entsize = v_uint64()
class Elf64Pheader(Elf32Pheader):
def __init__(self):
vstruct.VStruct.__init__(self)
self.p_type = v_uint32()
self.p_flags = v_uint32()
self.p_offset = v_uint64()
self.p_vaddr = v_uint64()
self.p_paddr = v_uint64()
self.p_filesz = v_uint64()
self.p_memsz = v_uint64()
self.p_align = v_uint64()
class Elf64Reloc(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.r_offset = v_ptr64()
self.r_info = v_uint64()
class Elf64Reloca(Elf64Reloc):
def __init__(self):
#Elf64Reloc.__init__(self)
vstruct.VStruct.__init__(self)
self.r_offset = v_uint64()
self.r_info = v_uint64()
self.r_addend = v_uint64()
class Elf64Symbol(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.st_name = v_uint32()
self.st_info = v_uint8()
self.st_other = v_uint8()
self.st_shndx = v_uint16()
self.st_value = v_uint64()
self.st_size = v_uint64()
class Elf64Dynamic(Elf32Dynamic):
pass
| Python |
Subsets and Splits
SQL Console for ajibawa-2023/Python-Code-Large
Provides a useful breakdown of language distribution in the training data, showing which languages have the most samples and helping identify potential imbalances across different language groups.