code stringlengths 1 1.72M | language stringclasses 1
value |
|---|---|
from django.db import models
from apps.common.models import CommonItem
#from feedbag.apps.review.models import Review
class Brand(CommonItem):
name = models.CharField(max_length=90)
slug = models.SlugField(max_length=40)
logo = models.FileField(upload_to='uploads/brands/')
description = models.TextField()
def __unicode__(self):
return self.name
@models.permalink
def get_absolute_url(self):
return ('brand_view', (), {'slug_brand': self.slug})
class ProductType(models.Model):
name = models.CharField(max_length=90)
slug = models.SlugField(max_length=40)
def __unicode__(self):
return self.name
class Product(CommonItem):
brand = models.ForeignKey(Brand)
type = models.ForeignKey(ProductType)
name = models.CharField(max_length=90)
slug = models.SlugField(max_length=40)
cost = models.CharField(max_length=10)
weight = models.CharField(max_length=10)
image = models.FileField(upload_to='uploads/products/')
description = models.TextField()
has_review = models.BooleanField(default = False)
def reviews(self):
return Review.objects.all().filter(active=True).filter(product=self.product)
def total_reviews(self):
return len(self.reviews())
def nutrition(self):
return ProductNutrition.objects.get(product=self)
@models.permalink
def get_absolute_url(self):
return ('product_view', (), {'slug_brand': self.brand.slug, 'slug_product': self.slug})
def __unicode__(self):
return self.name
class ProductNutrition(models.Model):
product = models.OneToOneField(Product)
fat = models.DecimalField(max_digits=4, decimal_places=2)
protein = models.DecimalField(max_digits=4, decimal_places=2)
carbohydrate = models.DecimalField(max_digits=4, decimal_places=2)
sodium = models.DecimalField(max_digits=4, decimal_places=2)
def __unicode__(self):
return str(self.id)
class ProductImage(models.Model):
product = models.ForeignKey(Product)
image = models.FileField(upload_to='uploads/brands/')
def __unicode__(self):
return self.image
class Review(CommonItem):
product = models.ForeignKey(Product)
body = models.TextField()
rating_taste = models.DecimalField(max_digits=2, decimal_places=1)
rating_value = models.DecimalField(max_digits=2, decimal_places=1)
rating_performance = models.DecimalField(max_digits=2, decimal_places=1)
def rating_overall(self):
return (self.rating_taste + self.rating_value + self.rating_performance) // 3
def __unicode__(self):
return str(self.id) | Python |
"""
This file demonstrates writing tests using the unittest module. These will pass
when you run "manage.py test".
Replace this with more appropriate tests for your application.
"""
from django.test import TestCase
class SimpleTest(TestCase):
def test_basic_addition(self):
"""
Tests that 1 + 1 always equals 2.
"""
self.assertEqual(1 + 1, 2)
| Python |
from django import template
from django.contrib.sites.models import Site
from feedbag.apps.product.models import Review, ProductType
register = template.Library()
@register.inclusion_tag('recent_reviews.html', takes_context=True)
def recent_reviews(context):
reviews = Review.objects.all().filter(active=True)
return {
'reviews': reviews,
'request': context['request'],
'settings': context['settings'],
}
@register.inclusion_tag('product_types.html', takes_context=True)
def product_types(context):
types = ProductType.objects.all()
return {
'types': types,
'request': context['request'],
'settings': context['settings'],
}
@register.inclusion_tag('sidebar_product.html', takes_context=True)
def sidebar_product(context):
return {
'request': context['request'],
'settings': context['settings'],
}
| Python |
# Create your views here.
from django import template
from django.http import HttpResponse
from django.shortcuts import render_to_response
from django.template import RequestContext
from feedbag.apps.product.models import Brand, Product, ProductType, \
ProductImage
template.add_to_builtins('feedbag.apps.product.templatetags.product')
def brand(request, slug_brand):
try:
brand = Brand.objects.all().filter(active=True).get(slug=slug_brand)
except:
pass
return render_to_response('brand.html', {'brand': brand})
def brands(request):
try:
brands = Brand.objects.all().filter(active=True)
except:
pass
return render_to_response('brands.html', {'brands': brands})
def product(request, slug_brand, slug_product):
try:
product = Product.objects.all().filter(active=True).filter(brand__slug = slug_brand).get(slug=slug_product)
except:
pass
return render_to_response('product.html', {'product':product},
context_instance=RequestContext(request))
def products(request):
try:
product = Product.objects.all().filter(active=True).filter(brand__slug = slug_brand)
except:
pass
return render_to_response('products.html', {'product':product})
def review(request):
return render_to_response('review.html', {},
context_instance=RequestContext(request))
def reviews(request):
products = []
try:
products = Product.objects.all().filter(active=True).filter(has_review=True)
except:
pass
return render_to_response('reviews.html', {'products': products},
context_instance=RequestContext(request))
| Python |
from feedbag.apps.blog.models import Post
from django.contrib.sitemaps import FlatPageSitemap, GenericSitemap
from feedbag.apps.product.models import Brand, Product, ProductType, \
ProductImage
def product_sitemap():
info_dict = {
'queryset': Product.objects.all().filter(active=True).filter(has_review=True),
'date_field': 'created_at',
}
return GenericSitemap(info_dict, priority=0.6) | Python |
from django.contrib import admin
from feedbag.apps.product.models import Brand, Product, ProductType, \
ProductImage, ProductNutrition, Review
class BrandAdmin(admin.ModelAdmin):
prepopulated_fields = {"slug": ("name",)}
admin.site.register(Brand, BrandAdmin)
class ProductTypeAdmin(admin.ModelAdmin):
prepopulated_fields = {"slug": ("name",)}
admin.site.register(ProductType, ProductTypeAdmin)
class ProductNutritionInline(admin.StackedInline):
model = ProductNutrition
class ProductAdmin(admin.ModelAdmin):
prepopulated_fields = {"slug": ("name",)}
readonly_fields = ('has_review',)
inlines = [ProductNutritionInline]
admin.site.register(Product, ProductAdmin)
class ReviewAdmin(admin.ModelAdmin):
def save_model(self, request, obj, form, change):
obj.product.has_review = True
print obj.product.has_review
obj.product.save()
obj.save()
admin.site.register(Review, ReviewAdmin) | Python |
from django.db import models
# Create your models here.
class TwitterProfile(models.Model):
screen_name = models.CharField(max_length=200)
active = models.BooleanField(default=True)
def __unicode__(self):
return self.screen_name
class TwitterTweet(models.Model):
text = models.CharField(max_length=140)
created = models.DateTimeField()
user_id = models.CharField(max_length=25)
uid = models.CharField(max_length=200, unique=True)
screen_name = models.CharField(max_length=200)
def __unicode__(self):
return str(self.id) | Python |
"""
This file demonstrates writing tests using the unittest module. These will pass
when you run "manage.py test".
Replace this with more appropriate tests for your application.
"""
from django.test import TestCase
class SimpleTest(TestCase):
def test_basic_addition(self):
"""
Tests that 1 + 1 always equals 2.
"""
self.assertEqual(1 + 1, 2)
| Python |
from django import template
from feedbag.apps.social.models import TwitterTweet
register = template.Library()
@register.inclusion_tag('twitter.html', takes_context=True)
def twitter(context, limit=10):
tweets = []
try:
tweets = TwitterTweet.objects.all().order_by('-created')[:limit]
except:
pass
return {
'tweets': tweets,
'request': context['request'],
'settings': context['settings'],
}
| Python |
"""
setup_django.py - robustly handle setting up the Django environment for standalone Python scripts.
Author: Mike Kibbel, mkibbel@capstrat.com - November 6, 2007
Simply copy this file into the same folder as your standalone script.
To set up the Django environment, the first line of your script should read:
import setup_django
Then, you'll be able to use Django as you'd expect, e.g. from myproject.myapp.models import MyModel
"""
import os, sys, re
# stop after reaching '/' on nix or 'C:\' on Windows
top_level_rx = re.compile(r'^(/|[a-zA-Z]:\\)$')
def is_top_level(path):
return top_level_rx.match(path) is not None
def prepare_environment():
# we'll need this script's directory for searching purposess
curdir, curfile = os.path.split(os.path.abspath(__file__))
# move up one directory at a time from this script's path, searching for settings.py
settings_module = None
while not settings_module:
try:
sys.path.append(curdir)
settings_module = __import__('settings', {}, {}, [''])
sys.path.pop()
sys.path.insert(0, curdir)
break
except ImportError:
settings_module = None
# have we reached the top-level directory?
if is_top_level(curdir):
raise Exception("settings.py was not found in the script's directory or any of its parent directories.")
# move up a directory
curdir = os.path.normpath(os.path.join(curdir, '..'))
# make django set up the environment using the settings module
from django.core.management import setup_environ
print settings_module
setup_environ(settings_module)
prepare_environment()
| Python |
# Create your views here.
| Python |
import setup_django
from django.http import HttpResponse
from django.shortcuts import render_to_response, get_object_or_404
from django.template import RequestContext
from django.utils import simplejson
from feedbag.apps.social.models import TwitterTweet, TwitterProfile
import datetime
import sys
import time
import urllib2
def main():
print 'cron main()'
getTwitterTweetsJson()
def getTwitterTweetsJson():
try:
profiles = TwitterProfile.objects.all()
TwitterTweet.objects.all().delete()
for profile in profiles:
url_info = urllib2.urlopen('http://api.twitter.com/1/statuses/user_timeline.json?screen_name='+profile.screen_name)
if (url_info):
json = simplejson.load(url_info)
if (json):
tweets = json
for tweet in tweets:
uid = uid=tweet['id_str']
try:
TwitterTweet.objects.get(uid = uid)
except TwitterTweet.DoesNotExist:
ts = time.strftime('%Y-%m-%d %H:%M:%S', time.strptime(tweet['created_at'],'%a %b %d %H:%M:%S +0000 %Y'))
TwitterTweet.objects.create(uid=uid, user_id=tweet['user']['id'], text=tweet['text'], screen_name=tweet['user']['screen_name'], created=ts)
else:
#getTwitterTweetsJson - Error! Getting URL
print ('getTwitterTweetsJson - Error! Getting URL')
except TwitterProfile.DoesNotExist:
raise Http404
if __name__ == "__main__":
main() | Python |
from django.contrib import admin
from django.db import models
from feedbag.apps.social.models import TwitterProfile, TwitterTweet
admin.site.register(TwitterProfile)
class TwitterTweetAdmin(admin.ModelAdmin):
list_display = ('screen_name', 'text', 'created')
ordering = ('-created', 'id')
pass
admin.site.register(TwitterTweet, TwitterTweetAdmin) | Python |
from django.db import models
# Create your models here.
| Python |
"""
This file demonstrates writing tests using the unittest module. These will pass
when you run "manage.py test".
Replace this with more appropriate tests for your application.
"""
from django.test import TestCase
class SimpleTest(TestCase):
def test_basic_addition(self):
"""
Tests that 1 + 1 always equals 2.
"""
self.assertEqual(1 + 1, 2)
| Python |
# Create your views here.
from django.http import HttpResponse
from django.shortcuts import render_to_response
from django.template import RequestContext
from feedbag.apps.product.models import Review
def home(request):
reviews = []
try:
reviews = Review.objects.all().filter(active=True)
except:
pass
return render_to_response('home.html', {'reviews': reviews},
context_instance=RequestContext(request))
| Python |
from django.db import models
# Create your models here.
class CommonItem(models.Model):
created_at = models.DateTimeField(auto_now = False)
updated_at = models.DateTimeField(auto_now = True)
active = models.BooleanField(default = True)
| Python |
"""
This file demonstrates writing tests using the unittest module. These will pass
when you run "manage.py test".
Replace this with more appropriate tests for your application.
"""
from django.test import TestCase
class SimpleTest(TestCase):
def test_basic_addition(self):
"""
Tests that 1 + 1 always equals 2.
"""
self.assertEqual(1 + 1, 2)
| Python |
from django import template
from django.contrib.sites.models import Site
register = template.Library()
@register.inclusion_tag('navigation_primary.html', takes_context=True)
def navigation_primary(context, active = '', link = True):
return {
'active': active,
'link': link,
'request': context['request'],
'settings': context['settings'],
}
@register.inclusion_tag('footer.html', takes_context=True)
def footer(context):
return {
'request': context['request'],
'settings': context['settings'],
}
@register.simple_tag
def current_domain():
return Site.objects.get_current() | Python |
# Create your views here.
from django.http import HttpResponse
from django.shortcuts import render_to_response
from django.template import RequestContext
def sitemap(request):
return render_to_response('sitemap.html', {},
context_instance=RequestContext(request))
def accessibility(request):
return render_to_response('accessibility.html', {},
context_instance=RequestContext(request))
def terms(request):
return render_to_response('terms.html', {},
context_instance=RequestContext(request))
| Python |
# Django settings for feedbag project.
import os
import sys
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@domain.com'),
)
MANAGERS = ADMINS
ROOT_PATH = os.path.dirname(__file__)
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'feedbag', # Or path to database file if using sqlite3.
'USER': 'root', # Not used with sqlite3.
'PASSWORD': 'root', # Not used with sqlite3.
'HOST': '/Applications/MAMP/tmp/mysql/mysql.sock', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '8888', # Set to empty string for default. Not used with sqlite3.
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = ROOT_PATH + '/../docroot/'
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
MEDIA_URL = '/static/'
# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/".
ADMIN_MEDIA_PREFIX = '/media/'
# Make this unique, and don't share it with anybody.
SECRET_KEY = '9*zi6u-^l%s%bb&gp81=#c^hwtowsjt)ks)6wh__55rgvwx@7$'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
ROOT_URLCONF = 'feedbag.urls'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
'./templates/',
)
TEMPLATE_CONTEXT_PROCESSORS = (
'context_processors.settings',
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.request',
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.admin',
'django.contrib.sitemaps',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
'feedbag.apps.common',
'feedbag.apps.home',
'feedbag.apps.blog',
'feedbag.apps.product',
'feedbag.apps.social',
)
| Python |
#!/usr/bin/env python
from django.core.management import execute_manager
try:
import settings # Assumed to be in the same directory.
except ImportError:
import sys
sys.stderr.write("Error: Can't find the file 'settings.py' in the directory containing %r. It appears you've customized things.\nYou'll have to run django-admin.py, passing it your settings module.\n(If the file settings.py does indeed exist, it's causing an ImportError somehow.)\n" % __file__)
sys.exit(1)
if __name__ == "__main__":
execute_manager(settings)
| Python |
from django.conf import settings
from django.conf.urls.defaults import *
from django.contrib import admin
from django.contrib.sitemaps import FlatPageSitemap, GenericSitemap
from feedbag.apps.blog.feeds import BlogFeed
from feedbag.apps.blog.sitemap import post_sitemap
from feedbag.apps.product.sitemap import product_sitemap
# Uncomment the next two lines to enable the admin:
admin.autodiscover()
sitemaps = {
'blog': post_sitemap(),
'product': product_sitemap(),
}
urlpatterns = patterns('feedbag.apps',
#admin
(r'^admin/', include(admin.site.urls)),
#sitemap
#(r'^sitemap\.xml$', 'django.contrib.sitemaps.views.sitemap', {'sitemaps': sitemaps}),
#brand
url(r'^brands/$', 'product.views.brands', name="brands_view"),
url(r'^products/$', 'product.views.products', name="products_view"),
#reviews
url(r'^reviews/$', 'product.views.reviews', name="reviews_view"),
url(r'^reviews/(?P<slug_brand>[a-zA-Z0-9_.-]+)/(?P<slug_product>[a-zA-Z0-9_.-]+)/$', 'product.views.product', name='product_view'),
url(r'^reviews/(?P<slug_brand>[a-zA-Z0-9_.-]+)/$', 'product.views.brand', name='brand_view'),
#blog
url(r'^blog/$', 'blog.views.blog', name="blog_view"),
url(r'^blog/(?P<year>\d{4})/(?P<month>\d{1,2})/(?P<day>\d{1,2})/(?P<slug>[a-zA-Z0-9-]+)$', 'blog.views.post', name="post_view"),
url(r'^blog/(?P<year>\d{4})/(?P<month>\d{1,2})/(?P<day>\d{1,2})/$', 'blog.views.archive_day', name="post_archive_day_view"),
url(r'^blog/(?P<year>\d{4})/(?P<month>\d{1,2})/$', 'blog.views.archive_month', name="post_archive_month_view"),
url(r'^blog/(?P<year>\d{4})/$', 'blog.views.archive_year', name="post_archive_year_view"),
url(r'^blog/rss/$', BlogFeed(), name='blog_rss'),
#common
url(r'^sitemap/$', 'common.views.sitemap', name="sitemap_view"),
url(r'^accessibility/$', 'common.views.accessibility', name="accessibility_view"),
url(r'^terms-and-conditions/$', 'common.views.terms', name="terms_view"),
url(r'^$', 'home.views.home', name="home_view"),
)
urlpatterns += patterns('',
(r'^static/(?P<path>.*)$', 'django.views.static.serve', {'document_root': settings.MEDIA_ROOT}),
)
urlpatterns += patterns('',
(r'^sitemap\.xml$', 'django.contrib.sitemaps.views.sitemap', {'sitemaps': sitemaps}),
)
| Python |
from django.conf import settings as settings_conf
from django import template
template.add_to_builtins('feedbag.apps.common.templatetags.common')
template.add_to_builtins('feedbag.apps.social.templatetags.social')
def settings(request):
return {
'settings': settings_conf
} | Python |
#!/usr/bin/env python
from django.core.management import execute_manager
try:
import settings # Assumed to be in the same directory.
except ImportError:
import sys
sys.stderr.write("Error: Can't find the file 'settings.py' in the directory containing %r. It appears you've customized things.\nYou'll have to run django-admin.py, passing it your settings module.\n(If the file settings.py does indeed exist, it's causing an ImportError somehow.)\n" % __file__)
sys.exit(1)
if __name__ == "__main__":
execute_manager(settings)
| Python |
# setup.py
import os
from distutils.core import setup
from pyke import knowledge_engine
# Make sure that compiled_krb files are up to date:
knowledge_engine.engine(
os.path.join(os.path.dirname(__file__), 'naimath', 'engine'))
setup(
name = 'naimath',
version = '0.1',
packages = ['naimath', 'naimath.engine', 'naimath.engine.compiled_krb',
'naimath.gui', 'naimath.web', ],
package_data = {
'naimath.engine': ['*.krb', '*.kqb', '*.kfb'],
},
author = "Dr. Inayath",
author_email = "inayath2020@gmail.com",
description = "Medical expert system",
license = "GNU General Public License version 3.0 (GPLv3)",
keywords = "???",
url = "http://naimath.sourceforge.net",
long_description = """
This needs to be filled in!
""",
download_url = "http://???",
classifiers = [
"Programming Language :: Python",
],
)
| Python |
from django.db import models
# Create your models here.
| Python |
"""
This file demonstrates two different styles of tests (one doctest and one
unittest). These will both pass when you run "manage.py test".
Replace these with more appropriate tests for your application.
"""
from django.test import TestCase
class SimpleTest(TestCase):
def test_basic_addition(self):
"""
Tests that 1 + 1 always equals 2.
"""
self.failUnlessEqual(1 + 1, 2)
__test__ = {"doctest": """
Another way to test that 1 + 1 is equal to 2.
>>> 1 + 1 == 2
True
"""}
| Python |
# urls.py
# These are the urlpatterns for the naimath_app.
#
# These need to be include()ed in the project's urls.py file.
from django.conf.urls.defaults import *
urlpatterns = patterns('',
# Example:
# (r'^naimath/', 'path.to.function' [, kw_args_dict [, name]]),
(r'^$', 'naimath_app.views.step1'),
(r'^step2$', 'naimath_app.views.step2'),
(r'^step3$', 'naimath_app.views.step3'),
(r'^step4$', 'naimath_app.views.step4'),
(r'^step5$', 'naimath_app.views.step5'),
)
| Python |
# naimath_extras.py
from django import template
from naimath.engine import question
register = template.Library()
@register.filter
def get_question(full_q_name):
try:
category, q_name = full_q_name.split(':')
return question.lookup(category, q_name)
except Exception:
return full_q_name
#defaults to this anyway: get_question.is_safe = False
| Python |
# Create your views here.
import itertools
import operator
from pprint import pprint
from django.shortcuts import render_to_response
from django.template import RequestContext
# Don't seem to need these anymore...
#from django.template import Context, loader
#from django.http import HttpResponse
# import the Django project settings
from naimath_project import settings
from naimath.engine import engine, question
Debug = True
Rule_base = 'pulmonary'
def load_translations(request, *domains):
#languages = request.accept_language.best_matches()
languages = (request.LANGUAGE_CODE, 'en_US', 'en_IN')
if Debug:
print "request", request
print "dir(request)", dir(request)
print "request.LANGUAGE_CODE", request.LANGUAGE_CODE
print "settings.LOCALE_DIR", settings.LOCALE_DIR
#print "request.accept_language.best_matches()", \
# request.accept_language.best_matches()
question.load_translations(settings.LOCALE_DIR, languages, *domains)
def consult(rule_base, symptom={}, sign={}, investigation={}):
r'''Consult the naimath engine.
Returns two lists:
* a list of (diagnosis, cf, [(question, relevance_score)]) sorted by
descending cf and relevance_score.
* a list of (question, combined_relevance_score) sorted by descending
combined_relevance_score.
'''
return engine.consult(rule_base, symptom=symptom, sign=sign,
investigation=investigation)
def step1(request):
return render_to_response('naimath_app/step1.html', {},
context_instance=RequestContext(request))
def step2(request):
return render_to_response('naimath_app/step2.html', {},
context_instance=RequestContext(request))
def step3(request):
load_translations(request, 'diagnosis',
'symptom',
# 'sign',
'investigation')
answers = request.POST
if Debug:
pprint(("answers", answers))
for cat, qs in itertools.groupby((tuple(key.split(':')) + (values,)
for key, values
in sorted(answers.iteritems())
if key != u'csrfmiddlewaretoken'),
key=operator.itemgetter(0)):
pprint((cat, tuple(qs)))
facts = dict(
(category, dict((q_name, answer)
for _, q_name, answer in questions))
for category, questions
in itertools.groupby((tuple(key.split(':')) + (values,)
for key, values
in sorted(answers.iteritems())
if key != u'csrfmiddlewaretoken'),
key=operator.itemgetter(0)))
if Debug:
pprint(("facts", facts))
diagnosis, questions = consult(Rule_base, **facts)
if Debug:
pprint(("diagnosis", diagnosis))
pprint(("questions", questions))
return render_to_response('naimath_app/step3.html',
{'answers': answers,
'diagnosis': diagnosis,
'questions': [
(':'.join((category, q_name)), relevance)
for category, q_name, relevance in questions],
},
context_instance=RequestContext(request))
def step4(request):
return render_to_response('naimath_app/step4.html', {},
context_instance=RequestContext(request))
def step5(request):
return render_to_response('naimath_app/step5.html', {},
context_instance=RequestContext(request))
| Python |
#!/usr/bin/env python
from django.core.management import execute_manager
try:
import settings # Assumed to be in the same directory.
except ImportError:
import sys
sys.stderr.write("Error: Can't find the file 'settings.py' in the directory containing %r. It appears you've customized things.\nYou'll have to run django-admin.py, passing it your settings module.\n(If the file settings.py does indeed exist, it's causing an ImportError somehow.)\n" % __file__)
sys.exit(1)
if __name__ == "__main__":
execute_manager(settings)
| Python |
from django.conf.urls.defaults import *
# Uncomment the next two lines to enable the admin:
# from django.contrib import admin
# admin.autodiscover()
urlpatterns = patterns('',
# Example:
# (r'^naimath/', include('naimath_project.foo.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
# (r'^doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
# (r'^admin/', include(admin.site.urls)),
(r'', include('naimath_app.urls')),
)
| Python |
# Django settings for naimath_project project.
import os
import sys
PROJECT_DIR = os.path.realpath(os.path.dirname(__file__))
#print "settings.py: PROJECT_DIR:", PROJECT_DIR
SOURCE_DIR = os.path.dirname(
os.path.dirname(
os.path.dirname(PROJECT_DIR)))
#print "settings.py: SOURCE_DIR:", SOURCE_DIR
if SOURCE_DIR not in sys.path:
sys.path.insert(0, SOURCE_DIR)
LOCALE_DIR = os.path.join(SOURCE_DIR, 'translation', '0.1')
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@domain.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
'NAME': PROJECT_DIR + '/naimath.db', # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
#TIME_ZONE = 'America/Chicago'
TIME_ZONE = None
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
MEDIA_URL = ''
# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/".
ADMIN_MEDIA_PREFIX = '/media/'
# Make this unique, and don't share it with anybody.
SECRET_KEY = '-*_zz!2pl(-87)(2(4q1^mi2^r(qs=0l*nu*h51jj7b@ez76hy'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
ROOT_URLCONF = 'naimath_project.urls'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
PROJECT_DIR + '/templates',
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
# Uncomment the next line to enable the admin:
# 'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
'naimath_app',
)
| Python |
#!/usr/bin/python
# wx_gui.py
import sys
import os
from doctest_tools import setpath
setpath.setpath(__file__, remove_first = True)
from naimath.gui import gui
def usage():
sys.stderr.write("usage: wx_gui.py rule_base [canned_questions]\n")
sys.exit(2)
if __name__ == "__main__":
#print "sys.argv", sys.argv
if not 2 <= len(sys.argv) <= 3: usage()
os.chdir(os.path.dirname(gui.__file__))
gui.run(*sys.argv[1:])
| Python |
# __init__.py for scripts
# This is only here to make the doctest_tools.setpath work.
| Python |
#!/usr/bin/python
# cli.py
import sys
import os.path
from doctest_tools import setpath
ROOT_DIR = setpath.setpath(__file__, remove_first = True)[0]
#print "ROOT_DIR", ROOT_DIR
from naimath.engine import engine, question
diagnosis_limit = 6
question_limit = 6
def run(rule_base, canned_questions = None, recorded_answers = None):
engine.init()
#question.load_translations('en_IN', 'en_US')
question.load_translations('en_US')
answers = {'symptom': {}, 'sign': {}, 'investigation': {}}
if canned_questions:
nested_questions, top_level_questions = \
question.read_canned_questions(canned_questions)
ask_questions(top_level_questions, nested_questions, answers)
while True:
diagnosis, questions = engine.consult(rule_base, **answers)
print
print "diagnosis:"
for i, (diagnosis, cf, _questions) \
in enumerate(diagnosis[:diagnosis_limit]):
print " %d: %s, CF %.1f" % \
(i + 1, question.diagnosis(diagnosis), cf)
print
print "questions:"
for i, (category, q_name, relevance_score) \
in enumerate(questions[:question_limit]):
print " %d: %s, weight %.1f" % \
(i + 1, question.lookup(category, q_name).question,
relevance_score)
if not questions:
print " No questions left"
else:
print
q = ask("Which question would you like to answer? ",
question_limit) \
- 1
category, q_name = questions[q][:2]
answers[category][q_name] = question.lookup(category, q_name).ask()
def ask_questions(questions, nested_questions, answers):
for cat, quest in questions:
if quest not in answers[cat]:
print
ans = question.lookup(cat, quest).ask()
answers[cat][quest] = ans
if (cat, quest) in nested_questions:
ans_map = nested_questions[cat, quest]
if ans in ans_map:
ask_questions(ans_map[ans], nested_questions, answers)
def ask(prompt, limit, special = None):
while True:
try:
ans = raw_input(prompt)
if special is not None and ans.lower() == special:
return 0
d = int(ans)
if 0 < d <= limit:
return d
except ValueError:
pass
print "Illegal answer: enter a number between 1 and", limit
def usage():
sys.stderr.write("usage: cli.py rule_base\n")
sys.exit(2)
if __name__ == "__main__":
#print "sys.argv", sys.argv
if len(sys.argv) != 2: usage()
run(sys.argv[1], os.path.join(ROOT_DIR, 'naimath', 'engine',
sys.argv[1] + '.canned'))
| Python |
#!/usr/bin/python
# make_kmap.py
import sys
import operator
import itertools
def kmap(file = sys.stdout, **questions):
r'''Prints a K-map for the questions to stdout.
The K-map looks like:
q1
|q2
YY YN Y? NY NN N? ?Y ?N ??
---+--+--+--+--+--+--+--+--+--+
q q YY | | | | | | | | | |
3 4 ---+--+--+--+--+--+--+--+--+--+
YN | | | | | | | | | |
---+--+--+--+--+--+--+--+--+--+
Y? | | | | | | | | | |
---+--+--+--+--+--+--+--+--+--+
'''
q_list = sorted(questions.keys())
top_len = min(3, len(q_list) // 2)
side_len = len(q_list) - top_len
top_list = q_list[:top_len]
side_list = q_list[top_len:]
sort_codes(questions, top_list)
sort_codes(questions, side_list)
top_indent = 2 * side_len + side_len + 2
for i, q in enumerate(top_list):
print >> file, "%*s%s%s" % (top_indent, '', '|' * i, q)
print >> file, "%*s%s" % \
(top_indent, '',
' '.join(gray_code(questions[q] for q in top_list)))
num_top_choices = reduce(operator.mul,
(len(questions[q]) for q in top_list))
horz_line = '-' * (side_len + 1) + \
('+' + '-' * top_len) * num_top_choices + '+'
print >> file, "%*s%s" % (2 * side_len, '', horz_line)
bars = ' ' + ('|' + ' ' * top_len) * num_top_choices + '|'
def body():
for ans in gray_code(questions[q] for q in side_list):
yield ans + bars
yield horz_line
for z in itertools.izip_longest(*side_list + [body()], fillvalue=' '):
print >> file, ' '.join(z)
def sort_codes(questions, l):
changed = True
while changed:
changed = False
for i in range(len(l) - 1):
if len(questions[l[i]]) % 2 != 0 \
and (len(questions[l[i + 1]]) % 2 == 0
or len(questions[l[i]]) < len(questions[l[i + 1]])):
l[i], l[i + 1] = l[i + 1], l[i]
changed = True
def gray_code(digits):
r'''Generates a gray code for digits.
Digits is a seq of digit sequences. All even length digit sequences must
appear first, followed by all odd length digit sequences in descending len
order.
>>> tuple(gray_code(('01',)))
('0', '1')
>>> tuple(gray_code(('01', '01')))
('00', '01', '11', '10')
>>> tuple(gray_code(('01', '012')))
('00', '01', '02', '12', '11', '10')
>>> tuple(gray_code(('012', '012')))
('00', '01', '02', '12', '10', '11', '21', '22', '20')
'''
def prepare(prior, digit):
if prior is None: return digit
if len(prior) % 2 == 0: return reverse(digit)
reverses = (len(prior) - len(digit)) // 2
if reverses:
return itertools.chain(repeat(reverses, reverse(digit)),
cycle(digit))
else:
return cycle(digit)
return product(*((len(digit), prepare(prior, digit))
for prior, digit in pairs(digits)))[1]
def pairs(seq):
r'''Generates pairs from seq.
>>> tuple(pairs((0, 1, 2)))
((None, 0), (0, 1), (1, 2))
'''
prior = None
for x in seq:
yield prior, x
prior = x
def product(*seqs):
r'''Generates the cross product of all of the seqs.
Each seq in seqs is (len, iterator).
Returns len, iterator
>>> l, it = product()
>>> l, tuple(it)
(0, ())
>>> l, it = product((3, '123'))
>>> l, tuple(it)
(3, ('1', '2', '3'))
>>> l, it = product((3, '123'), (2, '45'), (2, '67'))
>>> l
12
>>> tuple(it)
('146', '147', '156', '157', '246', '247', '256', '257', '346', '347', '356', '357')
'''
if len(seqs) == 0: return 0, ()
if len(seqs) == 1: return seqs[0]
len_rest, seq_rest = product(*seqs[1:])
return seqs[0][0] * len_rest, product1(seqs[0][1], len_rest, seq_rest)
def product1(seq1, seq2_len, seq2):
r'''Generates the cross product of seq1 and seq2.
Varies seq2 the fastest.
>>> tuple(product1('123', 3, '456'))
('14', '15', '16', '24', '25', '26', '34', '35', '36')
'''
seq2 = iter(itertools.cycle(seq2))
for i in seq1:
for j in itertools.islice(seq2, 0, seq2_len):
yield i + j
def reverse(seq):
r'''Generates seq forward, then reversed.
>>> tuple(reverse('012'))
('0', '1', '2', '2', '1', '0')
'''
t = tuple(seq)
return itertools.chain(t, t[::-1])
def cycle(seq):
r'''Cycle through seq starting at each starting position.
The starting positions start at 0 and rotate backwards through the seq.
>>> tuple(cycle('012'))
('0', '1', '2', '2', '0', '1', '1', '2', '0')
>>> tuple(cycle('01234'))
('0', '1', '2', '3', '4', '4', '0', '1', '2', '3', '3', '4', '0', '1', '2', '2', '3', '4', '0', '1', '1', '2', '3', '4', '0')
'''
t = tuple(seq)
for start in range(len(t), 0, -1):
for j in range(start, start + len(t)):
yield t[j % len(t)]
def repeat(n, seq):
r'''Generates seq n times.
>>> tuple(repeat(3, '123'))
('1', '2', '3', '1', '2', '3', '1', '2', '3')
'''
t = tuple(seq)
for i in range(n):
for x in t: yield x
def usage():
sys.stderr.write("usage: make_kmap.py [filename] question1=answers1...\n")
sys.exit(2)
if __name__ == "__main__":
if len(sys.argv) < 3: usage()
if '=' not in sys.argv[1]:
filename = sys.argv[1]
qa = sys.argv[2:]
else:
filename = None
qa = sys.argv[1:]
if len(qa) < 2: usage()
qa_dict = dict(arg.split('=') for arg in qa)
if filename:
with open(filename, 'wt') as f:
kmap(f, **qa_dict)
else:
kmap(**qa_dict)
| Python |
#!/usr/bin/env python
import wx
import wx.lib.buttons as buttons
data={'common_cold':{'runny_nose':'yes','nasal_blockage':'yes','sneezing':'yes'},'allergic_rhinitis':{'sneezing':'yes','nasal_blockage':'yes'}}
diseaselist=data.keys()
print diseaselist
class RefactorExample(wx.Frame):
def __init__(self, parent, id):
wx.Frame.__init__(self, parent, id, 'DEVEL TOOL For Knowledge Engineer - Add/Edit Rules',
size=(800, 800))
self.panel = wx.Panel(self, -1)
self.panel.SetBackgroundColour("White")
vbox = wx.BoxSizer(wx.VERTICAL)
self.panel.SetSizer(vbox)
self.paneltop=wx.Panel(self.panel,-1,size=(800,200))
self.paneltop.SetBackgroundColour("lightblue")
vbox.Add(self.paneltop)
self.panelmain=wx.Panel(self.panel,-1)
self.panelmain.SetBackgroundColour("White")
vbox.Add(self.panelmain,1,wx.EXPAND | wx.RIGHT)
hbox1 = wx.BoxSizer(wx.HORIZONTAL)
self.panelmain.SetSizer(hbox1)
b1=buttons.GenButton(self.paneltop,-1,'Enter Simple Rules',pos=(10,10),size=(210,50))
b1.SetFont(wx.Font(9, wx.ROMAN, wx.NORMAL, wx.BOLD, False))
b1.SetBackgroundColour("maroon")
b1.SetForegroundColour("white")
b1.SetToolTipString("Simple Rules in the format: disease name -> symptom name with CF.")
self.Bind(wx.EVT_BUTTON,self.OnSimple,b1)
b2=buttons.GenButton(self.paneltop,-1,'Enter Algorithm Rules',pos=(240,10),size=(230,50))
b2.SetFont(wx.Font(9, wx.ROMAN, wx.NORMAL, wx.BOLD, False))
b2.SetBackgroundColour("#3373ae")
b2.SetForegroundColour("white")
b2.SetToolTipString("Forward chaining rules : Algorithm wise x-->y--z")
self.Bind(wx.EVT_BUTTON,self.OnAlgorithm,b2)
b3=buttons.GenButton(self.paneltop,-1,'Enter Treatment Rules',pos=(500,10),size=(230,50))
b3.SetFont(wx.Font(9, wx.ROMAN, wx.NORMAL, wx.BOLD, False))
b3.SetBackgroundColour("#3373ae")
b3.SetForegroundColour("white")
b3.SetToolTipString("Treatment rules : Algorithm wise x-->y--z")
self.Bind(wx.EVT_BUTTON,self.OnTreatment,b3)
#----------- End of buttons.
# self.paneltop.Show(True)
#------------ Simple Rules Form
self.scroll1=wx.ScrolledWindow(self.panelmain,-1,pos=wx.DefaultPosition, size=(800,800), style=wx.HSCROLL | wx.VSCROLL)
self.scroll1.SetScrollbars(1,1,1300,1400)
hbox1.Add(self.scroll1, 1,flag=wx.EXPAND)
self.scroll1.SetBackgroundColour("white")
wx.StaticText(self.scroll1,-1,"Add/Edit Simple Rule(s) Form:", pos=(20,10)).SetFont(wx.Font(12, wx.DECORATIVE,wx.ITALIC, wx.NORMAL))
sampleList = ['Cardiovascular', 'Pulmonary', 'Nervous System', 'GastroIntestinal', 'Genito Urinary', 'Musculoskeletal','Psychiatry', 'ENT', 'Opthalmology','Rheumatology','Genetics','Paediatrics']
wx.StaticText(self.scroll1, -1, "Select the organ system:", (80, 40))
self.sysname=wx.Choice(self.scroll1, -1, (255, 35), choices=sampleList)
self.sysname.SetStringSelection("Cardiovascular")
wx.StaticText(self.scroll1, -1, "Please enter the disease name\n(eg.allergic rhinitis) : ", (80, 80))
self.dz_name=wx.TextCtrl(self.scroll1, -1, "",size=(255, -1),pos=(310,80))
dz_checkbutton=wx.Button(self.scroll1,-1,"Check if disease already exists",pos=(310,110),size=(255,25))
disease="Allergic Rhinitis"
h1=wx.StaticText(self.scroll1, -1, disease+":" , (80, 150))
h1.SetFont(wx.Font(13,wx.DECORATIVE,wx.ITALIC,wx.NORMAL))
h1.SetForegroundColour("darkblue")
#------Symptoms Form
global y
y=135
self.panel1=wx.Panel(self.scroll1,-1,pos=(80,180),size=(800,y))
self.panel1.SetBackgroundColour("#ECECEC")
self.sym_box=wx.StaticBox(self.panel1,-1,"Symptoms:",size=(800,y))
self.GenSymAddBox()
self.GenSymptoms()
global z
z=115
self.panel2=wx.Panel(self.scroll1,-1,pos=(80,220+y),size=(800,z))
self.sign_box=wx.StaticBox(self.panel2,-1,"Signs:",size=(800,z))
self.GenSignAddBox()
self.GenSigns()
t=y+z
global ez
ez=115
self.panel3=wx.Panel(self.scroll1,-1,pos=(80,250+t),size=(800,ez))
self.inv_box=wx.StaticBox(self.panel3,-1,"Investigations:",size=(800,ez))
self.panel3.SetBackgroundColour("#ECECEC")
self.GenInvAddBox()
self.GenInv()
def OnSimple(self,event):
pass
def OnAlgorithm(self,event):
pass
def OnTreatment(self,event):
pass
def GenSymAddBox(self):
self.syntax=wx.StaticText(self.panel1,-1,"Please enter the answer prefixed by symptom name eg. cough_gradual for TB .Use hyphens '-' for words\nwithin question eg. allergy-cold_yes. Enter their certainty ranging from -1 to 0 to 1", pos=(15,25))
self.syntax.SetFont(wx.Font(9,wx.DECORATIVE,wx.ITALIC,wx.NORMAL))
self.syntax.SetForegroundColour("Maroon")
self.addsym=wx.StaticText(self.panel1,-1,"Name:",pos=(15,70))
self.addsym.SetFont(wx.Font(10,wx.DECORATIVE,wx.ITALIC,wx.NORMAL))
self.addsym=wx.TextCtrl(self.panel1, -1, "",size=(225, -1),pos=(70,65))
self.addsymcf=wx.StaticText(self.panel1,-1,"Certainty Factor:",pos=(305,70))
self.addsymcf.SetFont(wx.Font(10,wx.DECORATIVE,wx.ITALIC,wx.NORMAL))
self.addsymcf=wx.TextCtrl(self.panel1, -1, "0.6",size=(45, -1),pos=(420,65))
self.add_btn=buttons.GenButton(self.panel1,-1,'Add!',pos=(490,65),size=(50,25))
self.add_btn.SetFont(wx.Font(10, wx.ROMAN, wx.NORMAL, wx.ITALIC, False))
self.add_btn.SetBackgroundColour("#3373ae")
self.add_btn.SetForegroundColour("white")
self.Bind(wx.EVT_BUTTON,self.OnSymAdd,self.add_btn)
wx.StaticText(self.panel1,-1,"___________________________________________________________",pos=(50,95))
def GenSymptoms(self):
self.sym_dict={'runny_nose_yes':'0.5','sneezing_yes':'0.6','sorethroat_yes':'0.4'}
ls = self.sym_dict.iteritems()
ls=data.values()[0]
print ls
global ranger
ranger=1
for name,cf in self.sym_dict.iteritems():
global y
self.sym_name=wx.StaticText(self.panel1,ranger+0,"Name:",pos=(15,y+5))
self.sym_name.SetFont(wx.Font(10,wx.DECORATIVE,wx.ITALIC,wx.NORMAL))
self.symname =wx.TextCtrl(self.panel1, ranger+1, name,size=(225, -1),pos=(70,y))
self.symname.Enable(False)
self.cf_name=wx.StaticText(self.panel1,ranger+2,"Certainty Factor:",pos=(305,y+5))
self.cf_name.SetFont(wx.Font(10,wx.DECORATIVE,wx.ITALIC,wx.NORMAL))
self.cf_input=wx.TextCtrl(self.panel1, ranger+3, cf,size=(45, -1),pos=(420,y))
self.cf_input.Enable(False)
self.edit_btn=buttons.GenButton(self.panel1,ranger+4,'Edit!',pos=(490,y),size=(50,25))
self.edit_btn.SetFont(wx.Font(10, wx.ROMAN, wx.NORMAL, wx.ITALIC, False))
self.edit_btn.SetBackgroundColour("#3373ae")
self.edit_btn.SetForegroundColour("white")
self.Bind(wx.EVT_BUTTON,self.OnSymEdit,self.edit_btn)
self.del_btn=buttons.GenButton(self.panel1,ranger+5,'Delete!',pos=(550,y),size=(70,25))
self.del_btn.SetFont(wx.Font(10, wx.ROMAN, wx.NORMAL, wx.ITALIC, False))
self.del_btn.SetBackgroundColour("#3373ae")
self.del_btn.SetForegroundColour("white")
self.Bind(wx.EVT_BUTTON,self.OnSymDelete,self.del_btn)
ranger=ranger+6
y=y+35
self.panel1.SetSize((650,y))
self.sym_box.SetSize((650,y))
print y
def GenSignAddBox(self):
self.syntax=wx.StaticText(self.panel2,-1,"Please enter the answer prefixed by symptom name eg. cough_gradual for TB .Use hyphens '-' for words\nwithin question eg. allergy-cold_yes. Enter their certainty ranging from -1 to 0 to 1", pos=(15,25))
self.syntax.SetFont(wx.Font(9,wx.DECORATIVE,wx.ITALIC,wx.NORMAL))
self.syntax.SetForegroundColour("Maroon")
self.addsign=wx.StaticText(self.panel2,-1,"Name:",pos=(15,70))
self.addsign.SetFont(wx.Font(10,wx.DECORATIVE,wx.ITALIC,wx.NORMAL))
self.addsign=wx.TextCtrl(self.panel2, -1, "",size=(225, -1),pos=(70,65))
self.addsigncf=wx.StaticText(self.panel2,-1,"Certainty Factor:",pos=(305,70))
self.addsigncf.SetFont(wx.Font(10,wx.DECORATIVE,wx.ITALIC,wx.NORMAL))
self.addsigncf=wx.TextCtrl(self.panel2, -1, "0.6",size=(45, -1),pos=(420,65))
self.add_btn=buttons.GenButton(self.panel2,-1,'Add!',pos=(490,65),size=(50,25))
self.add_btn.SetFont(wx.Font(10, wx.ROMAN, wx.NORMAL, wx.ITALIC, False))
self.add_btn.SetBackgroundColour("#3373ae")
self.add_btn.SetForegroundColour("white")
self.Bind(wx.EVT_BUTTON,self.OnSignAdd,self.add_btn)
wx.StaticText(self.panel1,-1,"___________________________________________________________",pos=(50,95))
def GenSigns(self):
self.sym_dict={'runny_nose_yes':'0.5','sneezing_yes':'0.6','sorethroat_yes':'0.4'}
ls = self.sym_dict.iteritems()
ls=data.values()[0]
print ls
global ranger
for name,cf in self.sym_dict.iteritems():
global z
self.sym_name=wx.StaticText(self.panel2,ranger+0,"Name:",pos=(15,z+5))
self.sym_name.SetFont(wx.Font(10,wx.DECORATIVE,wx.ITALIC,wx.NORMAL))
self.symname =wx.TextCtrl(self.panel2, ranger+1, name,size=(225, -1),pos=(70,z))
self.symname.Enable(False)
self.cf_name=wx.StaticText(self.panel2,ranger+2,"Certainty Factor:",pos=(305,z+5))
self.cf_name.SetFont(wx.Font(10,wx.DECORATIVE,wx.ITALIC,wx.NORMAL))
self.cf_input=wx.TextCtrl(self.panel2, ranger+3, cf,size=(45, -1),pos=(420,z))
self.cf_input.Enable(False)
self.edit_btn=buttons.GenButton(self.panel2,ranger+4,'Edit!',pos=(490,z),size=(50,25))
self.edit_btn.SetFont(wx.Font(10, wx.ROMAN, wx.NORMAL, wx.ITALIC, False))
self.edit_btn.SetBackgroundColour("#3373ae")
self.edit_btn.SetForegroundColour("white")
self.Bind(wx.EVT_BUTTON,self.OnSignEdit,self.edit_btn)
self.del_btn=buttons.GenButton(self.panel2,ranger+5,'Delete!',pos=(550,z),size=(70,25))
self.del_btn.SetFont(wx.Font(10, wx.ROMAN, wx.NORMAL, wx.ITALIC, False))
self.del_btn.SetBackgroundColour("#3373ae")
self.del_btn.SetForegroundColour("white")
self.Bind(wx.EVT_BUTTON,self.OnSignDelete,self.del_btn)
ranger=ranger+6
z=z+35
self.panel2.SetSize((650,z))
self.sign_box.SetSize((650,z))
def GenInvAddBox(self):
self.syntax=wx.StaticText(self.panel3,-1,"Please enter the answer prefixed by symptom name eg. cough_gradual for TB .Use hyphens '-' for words\nwithin question eg. allergy-cold_yes. Enter their certainty ranging from -1 to 0 to 1", pos=(15,25))
self.syntax.SetFont(wx.Font(9,wx.DECORATIVE,wx.ITALIC,wx.NORMAL))
self.syntax.SetForegroundColour("Maroon")
self.addinv=wx.StaticText(self.panel3,-1,"Name:",pos=(15,70))
self.addinv.SetFont(wx.Font(10,wx.DECORATIVE,wx.ITALIC,wx.NORMAL))
self.addinv=wx.TextCtrl(self.panel3, -1, "",size=(225, -1),pos=(70,65))
self.addinvcf=wx.StaticText(self.panel3,-1,"Certainty Factor:",pos=(305,70))
self.addinvcf.SetFont(wx.Font(10,wx.DECORATIVE,wx.ITALIC,wx.NORMAL))
self.addinvcf=wx.TextCtrl(self.panel3, -1, "0.6",size=(45, -1),pos=(420,65))
self.add_btn=buttons.GenButton(self.panel3,-1,'Add!',pos=(490,65),size=(50,25))
self.add_btn.SetFont(wx.Font(10, wx.ROMAN, wx.NORMAL, wx.ITALIC, False))
self.add_btn.SetBackgroundColour("#3373ae")
self.add_btn.SetForegroundColour("white")
self.Bind(wx.EVT_BUTTON,self.OnInvAdd,self.add_btn)
wx.StaticText(self.panel3,-1,"___________________________________________________________",pos=(50,95))
def GenInv(self):
self.sym_dict={'runny_nose_yes':'0.5','sneezing_yes':'0.6','sorethroat_yes':'0.4'}
ls = self.sym_dict.iteritems()
ls=data.values()[0]
print ls
global ranger
for name,cf in self.sym_dict.iteritems():
global ez
self.sym_name=wx.StaticText(self.panel3,ranger+0,"Name:",pos=(15,ez+5))
self.sym_name.SetFont(wx.Font(10,wx.DECORATIVE,wx.ITALIC,wx.NORMAL))
self.symname =wx.TextCtrl(self.panel3, ranger+1, name,size=(225, -1),pos=(70,ez))
self.symname.Enable(False)
self.cf_name=wx.StaticText(self.panel3,ranger+2,"Certainty Factor:",pos=(305,ez+5))
self.cf_name.SetFont(wx.Font(10,wx.DECORATIVE,wx.ITALIC,wx.NORMAL))
self.cf_input=wx.TextCtrl(self.panel3, ranger+3, cf,size=(45, -1),pos=(420,ez))
self.cf_input.Enable(False)
self.edit_btn=buttons.GenButton(self.panel3,ranger+4,'Edit!',pos=(490,ez),size=(50,25))
self.edit_btn.SetFont(wx.Font(10, wx.ROMAN, wx.NORMAL, wx.ITALIC, False))
self.edit_btn.SetBackgroundColour("#3373ae")
self.edit_btn.SetForegroundColour("white")
self.Bind(wx.EVT_BUTTON,self.OnInvEdit,self.edit_btn)
self.del_btn=buttons.GenButton(self.panel3,ranger+5,'Delete!',pos=(550,ez),size=(70,25))
self.del_btn.SetFont(wx.Font(10, wx.ROMAN, wx.NORMAL, wx.ITALIC, False))
self.del_btn.SetBackgroundColour("#3373ae")
self.del_btn.SetForegroundColour("white")
self.Bind(wx.EVT_BUTTON,self.OnInvDelete,self.del_btn)
ranger=ranger+6
ez=ez+35
self.panel3.SetSize((650,ez))
self.inv_box.SetSize((650,ez))
def OnSymEdit(self,event):
wid=event.GetId()
but=self.panel1.FindWindowById(wid)
but.SetLabel("Save!")
self.Bind(wx.EVT_BUTTON,self.SaveSymEdit,but)
symid=wid-3
sym=self.panel1.FindWindowById(symid)
sym.Enable(True)
cfid=wid-1
cf=self.panel1.FindWindowById(cfid)
cf.Enable(True)
delid=wid+1
delbtn=self.panel1.FindWindowById(delid)
delbtn.Enable(False)
def SaveSymEdit(self,event):
wid=event.GetId()
but=self.panel1.FindWindowById(wid)
self.Bind(wx.EVT_BUTTON,self.OnSymEdit,but)
but.SetLabel("Edit!")
symid=wid-3
sym=self.panel1.FindWindowById(symid)
sym.Enable(False)
cfid=wid-1
cf=self.panel1.FindWindowById(cfid)
cf.Enable(False)
delid=wid+1
delbtn=self.panel1.FindWindowById(delid)
delbtn.Enable(True)
# print sym.GetValue(),cf.GetValue()
# print "saved!"
# print self.sysname.GetStringSelection()
def OnSymDelete(self,event):
wid=event.GetId()
but=self.panel1.FindWindowById(wid)
symstid=wid-5
symst=self.panel1.FindWindowById(symstid)
symst.Destroy()
symid=wid-4
sym=self.panel1.FindWindowById(symid)
value= sym.GetValue()
sym.Destroy()
cfstid=wid-3
cfst=self.panel1.FindWindowById(cfstid)
cfst.Destroy()
cfid=wid-2
cf=self.panel1.FindWindowById(cfid)
cf.Destroy()
editid=wid-1
editbtn=self.panel1.FindWindowById(editid)
editbtn.Destroy()
but.Destroy()
global y
def OnSymAdd(self,event):
print self.addsym.GetValue(), self.addsymcf.GetValue()
self.sym_dict[self.addsym.GetValue()]=self.addsymcf.GetValue()
self.addsym.SetValue("")
self.addsymcf.SetValue("")
# print self.sym_dict
# Refresh Panel.
pass
def OnSignEdit(self,event):
wid=event.GetId()
but=self.panel2.FindWindowById(wid)
but.SetLabel("Save!")
self.Bind(wx.EVT_BUTTON,self.SaveSignEdit,but)
symid=wid-3
sym=self.panel2.FindWindowById(symid)
sym.Enable(True)
cfid=wid-1
cf=self.panel2.FindWindowById(cfid)
cf.Enable(True)
delid=wid+1
delbtn=self.panel2.FindWindowById(delid)
delbtn.Enable(False)
def SaveSignEdit(self,event):
wid=event.GetId()
but=self.panel2.FindWindowById(wid)
self.Bind(wx.EVT_BUTTON,self.OnSignEdit,but)
but.SetLabel("Edit!")
symid=wid-3
sym=self.panel2.FindWindowById(symid)
sym.Enable(False)
cfid=wid-1
cf=self.panel2.FindWindowById(cfid)
cf.Enable(False)
delid=wid+1
delbtn=self.panel2.FindWindowById(delid)
delbtn.Enable(True)
# print sym.GetValue(),cf.GetValue()
# print "saved!"
# print self.sysname.GetStringSelection()
def OnSignDelete(self,event):
wid=event.GetId()
but=self.panel2.FindWindowById(wid)
symstid=wid-5
symst=self.panel2.FindWindowById(symstid)
symst.Destroy()
symid=wid-4
sym=self.panel2.FindWindowById(symid)
value= sym.GetValue()
sym.Destroy()
cfstid=wid-3
cfst=self.panel2.FindWindowById(cfstid)
cfst.Destroy()
cfid=wid-2
cf=self.panel2.FindWindowById(cfid)
cf.Destroy()
editid=wid-1
editbtn=self.panel2.FindWindowById(editid)
editbtn.Destroy()
but.Destroy()
global y
def OnSignAdd(self,event):
print self.addsign.GetValue(), self.addsigncf.GetValue()
self.sym_dict[self.addsign.GetValue()]=self.addsigncf.GetValue()
self.addsign.SetValue("")
self.addsigncf.SetValue("")
# print self.sym_dict
# Refresh Panel.
pass
def OnInvEdit(self,event):
wid=event.GetId()
but=self.panel3.FindWindowById(wid)
but.SetLabel("Save!")
self.Bind(wx.EVT_BUTTON,self.SaveInvEdit,but)
symid=wid-3
sym=self.panel3.FindWindowById(symid)
sym.Enable(True)
cfid=wid-1
cf=self.panel3.FindWindowById(cfid)
cf.Enable(True)
delid=wid+1
delbtn=self.panel3.FindWindowById(delid)
delbtn.Enable(False)
def SaveInvEdit(self,event):
wid=event.GetId()
but=self.panel3.FindWindowById(wid)
self.Bind(wx.EVT_BUTTON,self.OnInvEdit,but)
but.SetLabel("Edit!")
symid=wid-3
sym=self.panel3.FindWindowById(symid)
sym.Enable(False)
cfid=wid-1
cf=self.panel3.FindWindowById(cfid)
cf.Enable(False)
delid=wid+1
delbtn=self.panel3.FindWindowById(delid)
delbtn.Enable(True)
# print sym.GetValue(),cf.GetValue()
# print "saved!"
# print self.sysname.GetStringSelection()
def OnInvDelete(self,event):
wid=event.GetId()
but=self.panel3.FindWindowById(wid)
symstid=wid-5
symst=self.panel3.FindWindowById(symstid)
symst.Destroy()
symid=wid-4
sym=self.panel3.FindWindowById(symid)
value= sym.GetValue()
sym.Destroy()
cfstid=wid-3
cfst=self.panel3.FindWindowById(cfstid)
cfst.Destroy()
cfid=wid-2
cf=self.panel3.FindWindowById(cfid)
cf.Destroy()
editid=wid-1
editbtn=self.panel3.FindWindowById(editid)
editbtn.Destroy()
but.Destroy()
global y
def OnInvAdd(self,event):
print self.addinv.GetValue(), self.addinvcf.GetValue()
self.sym_dict[self.addinv.GetValue()]=self.addinvcf.GetValue()
self.addinv.SetValue("")
self.addinvcf.SetValue("")
# print self.sym_dict
# Refresh Panel.
pass
if __name__ == '__main__':
app = wx.PySimpleApp()
frame = RefactorExample(parent=None, id=-1)
frame.Show()
app.MainLoop()
| Python |
#!/usr/bin/python
# extract_questions.py
# .pot format:
#
# <whitespace>
# #: file:line ...
# msgid "unstranslated_string"
# msgstr "translated_string"
# We need:
#
# category, q_name (with file:line references)
# choices (with file:line references)
import sys
import os
import collections
import functools
import re
Debug = True
from doctest_tools import setpath
Root_dir = setpath.setpath(__file__, remove_first=True)[0]
#print "Root_dir", Root_dir
from naimath.engine import extracted_questions
from naimath.lib import po_headers
Translation_dir = os.path.join(Root_dir, 'translation', '0.1', 'po')
#print "Translation_dir", Translation_dir
def read_file(filename, d_results, q_results):
r'''Update d_results, q_results dict with entries from filename.
D_results dict is {untranslated_diagnosis: [(file, line)...]}
Q_results dict is {category: {q_name:
([(file, line)...],
{choice: [(file, line)...]}
),
},
}
'''
if Debug: print "reading", filename
with open(filename) as f:
line_cont = None
for lineno, line in enumerate(f, start=1):
line = line.strip()
if line_cont is not None:
line_cont = \
line_cont(line, filename, lineno, d_results, q_results)
else:
#print "checking %s(%d): %s" % (filename, lineno, line)
for pattern, fn in Patterns:
m = pattern.match(line)
if m:
#print "got match"
if Debug: print "matched", line
line_cont = fn(m, filename, lineno,
d_results, q_results)
def make_q_results():
r'''Makes a result dict with the proper default entries.
>>> d = make_q_results()
>>> d['cat']['a'][1]['b']
[]
>>> d['cat']['a']
([], defaultdict(<type 'list'>, {'b': []}))
'''
return collections.defaultdict(
lambda: collections.defaultdict(
lambda: ([], collections.defaultdict(list))))
def q_a(ans_fn, m, filename, lineno, d_results, q_results):
rest = m.group(1).strip()
if rest:
return get_question(ans_fn, rest, filename, lineno,
d_results, q_results)
return functools.partial(get_questions, ans_fn)
Question_re = re.compile(
r'''\s* ['"] ( [^"']+ ) ["'] \s* ,
\s* ['"] ( [^"']+ ) ['"] \s* ,
(.*)''',
re.VERBOSE)
def get_question(get_ans_fn, line, filename, lineno, d_results, q_results):
m = Question_re.match(line)
assert m, "%s(%d): invalid question: %s" % (filename, lineno, line)
category = m.group(1).strip()
question = m.group(2).strip()
rest = m.group(3).strip()
q_results[category][question][0].append((filename, lineno))
if rest:
return get_ans_fn(category, question, rest, filename, lineno,
d_results, q_results)
return functools.partial(get_ans_fn, category, question)
Single_ans_re = re.compile(
r'''\s* ['"] ( [^'"]+ ) ['"] \s* ,''',
re.VERBOSE)
def single_ans(category, question, ans, filename, lineno, d_results, q_results):
m = Single_ans_re.match(ans)
assert m, "%s(%d): invalid answer: %s" % (filename, lineno, ans)
choice = m.group(1).strip()
q_results[category][question][1][choice].append((filename, lineno))
Ans_tuple_re = re.compile(
r'''\s* ( [^= ]+ ) \s* = \s* (.*)''',
re.VERBOSE)
def ans_tuple(category, question, ans, filename, lineno, d_results, q_results):
m = Ans_tuple_re.match(ans)
assert m, "%s(%d): invalid answer: %s" % (filename, lineno, ans)
choice = m.group(1).strip()
q_results[category][question][1][choice].append((filename, lineno))
rest = m.group(2).strip()
if rest:
return skip_score(category, question, rest, filename, lineno,
d_results, q_results)
return functools.partial(skip_score, category, question)
Score_re = re.compile(r'''[-+\d.]+ (.*)''', re.VERBOSE)
def skip_score(category, question, score, filename, lineno, d_results,
q_results):
m = Score_re.match(score)
assert m, "%s(%d): invalid score: %s" % (filename, lineno, score)
rest = m.group(1).strip()
if rest[0] == ')': return None
rest = rest[1:].lstrip()
if rest:
return ans_tuple(category, question, rest, filename, lineno,
d_results, q_results)
return functools.partial(ans_tuple, category, question)
def diagnosis(m, filename, lineno, d_results, q_results):
name = m.group(1)
d_results[name].append((filename, lineno))
Patterns = (
(re.compile(r'use\s+diagnosis\((\w+),'), diagnosis),
(re.compile(r'score\.got\((.*)'), functools.partial(q_a, ans_tuple)),
(re.compile(r'score\.not_\((.*)'), functools.partial(q_a, single_ans)),
)
def dump_d(d_results):
for d, refs in d_results.iteritems():
print "%s: %s" % (d, ' '.join("%s:%d" % (filename, lineno)
for filename, lineno in refs))
def dump_q(q_results):
for category, questions in q_results.iteritems():
print "%s:" % (category,)
for q, (refs, choices) in questions.iteritems():
print "%s: %s" % (q, ' '.join("%s:%d" % (filename, lineno)
for filename, lineno in refs))
for choice, refs in choices.iteritems():
print " %s: %s" % (choice,
' '.join("%s:%d" % (filename, lineno)
for filename, lineno in refs))
def write_d_pot(d_results, filename):
with open(os.path.join(Translation_dir, filename), 'w') as f:
po_headers.write_header(f)
for d in sorted(d_results.iterkeys()):
print >> f
refs = d_results[d]
refs.sort()
print >> f, "#:", ' '.join("%s:%d" %
(os.path.basename(file), lineno)
for file, lineno in refs)
print >> f, 'msgid "%s"' % (d,)
print >> f, 'msgstr "%s"' % (d,)
def write_extracted_questions(q_results):
if Debug: print "write_extracted_questions"
all_q_names = set()
for category, questions in q_results.iteritems():
for q_name, (_, choices) in questions.iteritems():
q_info = find_q_name(q_name, category, guess_q_type(choices.keys()))
all_choices = set()
for choice, references in choices.iteritems():
choice_info = find_choice(q_info, choice)
refs = ' '.join("%s:%d" % (os.path.basename(filename), lineno)
for filename, lineno in sorted(references))
if len(choice_info) < 2: choice_info.append(refs)
else: choice_info[1] = refs
all_choices.add(choice)
for c in q_info[3:]:
if c[0] not in all_choices and len(c) > 1:
c[1] = ''
all_q_names.add((q_name, category))
to_delete = []
for i, q in enumerate(extracted_questions.Questions):
if (q[0], q[1]) not in all_q_names: to_delete.append(i)
for i in to_delete[::-1]:
del extracted_questions.Questions[i]
rewrite_extracted_questions()
def guess_q_type(choices):
choices = frozenset(choices)
if choices.issubset(('yes', 'no')): return "yes_no"
if choices.issubset(('positive', 'negative')): return "positive_negative"
return "single_choice"
def find_q_name(q_name, category, q_type):
for q in extracted_questions.Questions:
if q_name == q[0] and category == q[1]: return q
ans = [q_name, category, q_type]
extracted_questions.Questions.append(ans)
return ans
def find_choice(q_info, choice):
for c in q_info[3:]:
if c[0] == choice: return c
ans = [choice]
q_info.append(ans)
return ans
def rewrite_extracted_questions():
if Debug: print "rewrite_extracted_questions"
with open(extracted_questions.__file__, 'w') as f:
print >> f, "# extracted_questions.py"
print >> f
print >> f, "Questions = ["
for q_info in sorted(extracted_questions.Questions):
print >> f, " [%r, %r, %r," % tuple(q_info[:3])
for choice in q_info[3:]:
print >> f, " %r," % (choice,)
print >> f, " ],"
print >> f, "]"
def usage():
sys.stderr.write("usage: extract_questions.py krb_file...\n")
sys.exit(2)
if __name__ == "__main__":
if len(sys.argv) < 2: usage()
d_results = collections.defaultdict(list)
q_results = make_q_results()
for filename in sys.argv[1:]:
read_file(filename, d_results, q_results)
#dump_d(d_results)
#dump_q(q_results)
write_d_pot(d_results, "diagnosis.pot")
write_extracted_questions(q_results)
| Python |
#!/usr/bin/python
# makepot.py
# .pot format:
#
# <whitespace>
# #. extracted comments (choices)
# #: file:line
# msgid "unstranslated_string"
# msgstr "translated_string"
import sys
import os
import itertools
import operator
from doctest_tools import setpath
Root_dir = setpath.setpath(__file__, remove_first=True)[0]
#print "Root_dir", Root_dir
from naimath.engine import extracted_questions, question
from naimath.lib import po_headers
Translation_dir = os.path.join(Root_dir, 'translation', '0.1', 'po')
#print "Translation_dir", Translation_dir
def write_q_pots():
for category, questions \
in itertools.groupby(sorted(extracted_questions.Questions,
key=operator.itemgetter(1)),
key=operator.itemgetter(1)):
write_q_pot(questions, category)
def write_q_pot(questions, category):
with open(os.path.join(Translation_dir, category + '.pot'), 'w') as f:
po_headers.write_header(f)
for q_info in sorted(questions):
print >> f
print >> f, "#. type:", q_info[2]
q_name = q_info[0]
choices = tuple(q_info[3:])
print >> f, "#. choices:", ' '.join(c_info[0] for c_info in choices)
if question.Classes[q_info[2]].translate_choices:
print >> f, 'msgid "%s"' % (q_name,)
print >> f, 'msgstr "%s"' % (q_name,)
for c_info in choices:
print >> f
for extracted_comment in c_info[2:]:
print >> f, "#.", extracted_comment
if len(c_info) > 1 and c_info[1]:
print >> f, "#:", c_info[1]
print >> f, 'msgid "%s:%s"' % (q_name, c_info[0])
print >> f, 'msgstr "%s"' % (c_info[0],)
else:
for c_info in choices:
for extracted_comment in c_info[2:]:
print >> f, "#.", c_info[0] + ":", extracted_comment
refs = itertools.chain.from_iterable(c_info[1].split()
for c_info in choices
if len(c_info) > 1)
print >> f, "#:", ' '.join(sorted(refs, key=ref_key))
print >> f, 'msgid "%s"' % (q_name,)
print >> f, 'msgstr "%s"' % (q_name,)
def ref_key(ref):
file, lineno = ref.split(':')
return file, int(lineno)
def usage():
sys.stderr.write("usage: makepot.py\n")
sys.exit(2)
if __name__ == "__main__":
if len(sys.argv) != 1: usage()
write_q_pots()
| Python |
# web.py
# Possibly interesting values:
# CONTENT_LENGTH:
# CONTENT_TYPE: application/x-www-form-urlencoded
# PATH_INFO: /hello/mom/and/dad.html
# QUERY_STRING: this=value&that=too
# REMOTE_ADDR: 127.0.0.1
# REQUEST_METHOD: GET
# SCRIPT_NAME:
# wsgi.errors: <file>
# wsgi.file_wrapper: <file>
# wsgi.input: <file to read request body>
# wsgi.multiprocess: False
# wsgi.multithread: True
# wsgi.run_once: False
import webob
import wsgiref.util
Debug = 0
#Web_framework_dir = os.path.join(os.path.dirname(__file__), "media")
#
#Content_types = {
# 'html': 'text/html',
# 'js': 'text/javascript',
# 'css': 'text/css',
# 'gif': 'image/gif',
# 'png': 'image/png',
# 'jpeg': 'image/jpeg',
# 'jpg': 'image/jpeg',
#}
Module_cache = {}
def import_(modulename):
''' modulepath does not include .py
'''
if Debug: print >> sys.stderr, "import_:", modulename
mod = __import__(modulename)
for comp in modulename.split('.')[1:]:
mod = getattr(mod, comp)
return mod
def wsgi_app(environ, start_response):
global Module_cache
modulepath = wsgiref.util.shift_path_info(environ)
assert modulepath, "Missing modulepath in URL"
fn_name = wsgiref.util.shift_path_info(environ)
assert fn_name, "Missing fn_name in URL"
if modulepath not in Module_cache:
Module_cache[modulepath] = import_('naimath.web.' + modulepath)
fn = getattr(Module_cache[modulepath], fn_name)
req = webob.Request(environ)
return fn(req)(environ, start_response)
| Python |
#!/usr/bin/env python
##################################################
## DEPENDENCIES
import sys
import os
import os.path
try:
import builtins as builtin
except ImportError:
import __builtin__ as builtin
from os.path import getmtime, exists
import time
import types
from Cheetah.Version import MinCompatibleVersion as RequiredCheetahVersion
from Cheetah.Version import MinCompatibleVersionTuple as RequiredCheetahVersionTuple
from Cheetah.Template import Template
from Cheetah.DummyTransaction import *
from Cheetah.NameMapper import NotFound, valueForName, valueFromSearchList, valueFromFrameOrSearchList
from Cheetah.CacheRegion import CacheRegion
import Cheetah.Filters as Filters
import Cheetah.ErrorCatchers as ErrorCatchers
##################################################
## MODULE CONSTANTS
VFFSL=valueFromFrameOrSearchList
VFSL=valueFromSearchList
VFN=valueForName
currentTime=time.time
__CHEETAH_version__ = '2.4.2.1'
__CHEETAH_versionTuple__ = (2, 4, 2, 'final', 1)
__CHEETAH_genTime__ = 1274495961.816674
__CHEETAH_genTimestamp__ = 'Fri May 21 22:39:21 2010'
__CHEETAH_src__ = 'symptoms.tmpl'
__CHEETAH_srcLastModified__ = 'Fri May 21 22:39:17 2010'
__CHEETAH_docstring__ = 'Autogenerated by Cheetah: The Python-Powered Template Engine'
if __CHEETAH_versionTuple__ < RequiredCheetahVersionTuple:
raise AssertionError(
'This template was compiled with Cheetah version'
' %s. Templates compiled before version %s must be recompiled.'%(
__CHEETAH_version__, RequiredCheetahVersion))
##################################################
## CLASSES
class symptoms(Template):
##################################################
## CHEETAH GENERATED METHODS
def __init__(self, *args, **KWs):
super(symptoms, self).__init__(*args, **KWs)
if not self._CHEETAH__instanceInitialized:
cheetahKWArgs = {}
allowedKWs = 'searchList namespaces filter filtersLib errorCatcher'.split()
for k,v in KWs.items():
if k in allowedKWs: cheetahKWArgs[k] = v
self._initCheetahInstance(**cheetahKWArgs)
def respond(self, trans=None):
## CHEETAH: main method generated for this template
if (not trans and not self._CHEETAH__isBuffering and not callable(self.transaction)):
trans = self.transaction # is None unless self.awake() was called
if not trans:
trans = DummyTransaction()
_dummyTrans = True
else: _dummyTrans = False
write = trans.response().write
SL = self._CHEETAH__searchList
_filter = self._CHEETAH__currentFilter
########################################
## START - generated method body
self._handleCheetahInclude(VFFSL(SL,"filesdir",True) + "/templates/base_start.txt", trans=trans, includeFrom="file", raw=False)
write(u'''
''')
#
# title = title
# phase = symptoms | ...
# answers = [(q_name, answer)]
# diagnosis_list = [(diagnosis, cf, [(question obj, weight)])]
#
write(u'''
<h2>Diagnosis:</h2>
''')
for diagnosis in VFFSL(SL,"diagnosis_list",True): # generated from line 12, col 1
write(u'''
<h3>''')
_v = VFFSL(SL,"diagnosis",True)[0] # u'${diagnosis[0]}' on line 14, col 7
if _v is not None: write(_filter(_v, rawExpr=u'${diagnosis[0]}')) # from line 14, col 7.
write(u'''(''')
_v = VFN(VFFSL(SL,"operator",True),"mod",False)('%.2f', diagnosis[1]) # u"${operator.mod('%.2f', diagnosis[1])}" on line 14, col 23
if _v is not None: write(_filter(_v, rawExpr=u"${operator.mod('%.2f', diagnosis[1])}")) # from line 14, col 23.
write(u''')</h3>
''')
if VFFSL(SL,"diagnosis",True)[2]: # generated from line 16, col 1
write(u'''<form method="post" action="symptoms">
''')
for question in VFFSL(SL,"diagnosis",True)[2]: # generated from line 19, col 3
write(u''' ''')
_v = VFN(VFFSL(SL,"question",True)[0],"question",True) # u'${question[0].question}' on line 20, col 5
if _v is not None: write(_filter(_v, rawExpr=u'${question[0].question}')) # from line 20, col 5.
write(u'''(''')
_v = VFN(VFFSL(SL,"operator",True),"mod",False)('%.2f', VFFSL(SL,"question",True)[1]) # u"${operator.mod('%.2f', $question[1])}" on line 20, col 29
if _v is not None: write(_filter(_v, rawExpr=u"${operator.mod('%.2f', $question[1])}")) # from line 20, col 29.
write(u''')
''')
for choice in VFN(VFFSL(SL,"question",True)[0],"get_input_elements",True): # generated from line 21, col 5
write(u''' ''')
_v = VFFSL(SL,"choice",True) # u'$choice' on line 22, col 7
if _v is not None: write(_filter(_v, rawExpr=u'$choice')) # from line 22, col 7.
write(u'''
''')
write(u''' <br />
''')
write(u'''
''')
for item in VFFSL(SL,"answers",True): # generated from line 27, col 3
write(u''' <input type="hidden" name="''')
_v = VFFSL(SL,"item",True)[0] # u'$item[0]' on line 28, col 32
if _v is not None: write(_filter(_v, rawExpr=u'$item[0]')) # from line 28, col 32.
write(u'''" value="''')
_v = VFFSL(SL,"item",True)[1] # u'$item[1]' on line 28, col 49
if _v is not None: write(_filter(_v, rawExpr=u'$item[1]')) # from line 28, col 49.
write(u'''" />
''')
write(u'''<input type="submit" />
</form>
''')
write(u'''
''')
write(u'''
''')
self._handleCheetahInclude(VFFSL(SL,"filesdir",True) + "/templates/base_end.txt", trans=trans, includeFrom="file", raw=False)
########################################
## END - generated method body
return _dummyTrans and trans.response().getvalue() or ""
##################################################
## CHEETAH GENERATED ATTRIBUTES
_CHEETAH__instanceInitialized = False
_CHEETAH_version = __CHEETAH_version__
_CHEETAH_versionTuple = __CHEETAH_versionTuple__
_CHEETAH_genTime = __CHEETAH_genTime__
_CHEETAH_genTimestamp = __CHEETAH_genTimestamp__
_CHEETAH_src = __CHEETAH_src__
_CHEETAH_srcLastModified = __CHEETAH_srcLastModified__
_mainCheetahMethod_for_symptoms= 'respond'
## END CLASS DEFINITION
if not hasattr(symptoms, '_initCheetahAttributes'):
templateAPIClass = getattr(symptoms, '_CHEETAH_templateClass', Template)
templateAPIClass._addCheetahPlumbingCodeToClass(symptoms)
# CHEETAH was developed by Tavis Rudd and Mike Orr
# with code, advice and input from many other volunteers.
# For more information visit http://www.CheetahTemplate.org/
##################################################
## if run from command line:
if __name__ == '__main__':
from Cheetah.TemplateCmdLineIface import CmdLineIface
CmdLineIface(templateObj=symptoms()).run()
| Python |
#!/usr/bin/env python
##################################################
## DEPENDENCIES
import sys
import os
import os.path
try:
import builtins as builtin
except ImportError:
import __builtin__ as builtin
from os.path import getmtime, exists
import time
import types
from Cheetah.Version import MinCompatibleVersion as RequiredCheetahVersion
from Cheetah.Version import MinCompatibleVersionTuple as RequiredCheetahVersionTuple
from Cheetah.Template import Template
from Cheetah.DummyTransaction import *
from Cheetah.NameMapper import NotFound, valueForName, valueFromSearchList, valueFromFrameOrSearchList
from Cheetah.CacheRegion import CacheRegion
import Cheetah.Filters as Filters
import Cheetah.ErrorCatchers as ErrorCatchers
##################################################
## MODULE CONSTANTS
VFFSL=valueFromFrameOrSearchList
VFSL=valueFromSearchList
VFN=valueForName
currentTime=time.time
__CHEETAH_version__ = '2.4.2.1'
__CHEETAH_versionTuple__ = (2, 4, 2, 'final', 1)
__CHEETAH_genTime__ = 1274495961.6691689
__CHEETAH_genTimestamp__ = 'Fri May 21 22:39:21 2010'
__CHEETAH_src__ = 'canned.tmpl'
__CHEETAH_srcLastModified__ = 'Fri May 21 22:39:08 2010'
__CHEETAH_docstring__ = 'Autogenerated by Cheetah: The Python-Powered Template Engine'
if __CHEETAH_versionTuple__ < RequiredCheetahVersionTuple:
raise AssertionError(
'This template was compiled with Cheetah version'
' %s. Templates compiled before version %s must be recompiled.'%(
__CHEETAH_version__, RequiredCheetahVersion))
##################################################
## CLASSES
class canned(Template):
##################################################
## CHEETAH GENERATED METHODS
def __init__(self, *args, **KWs):
super(canned, self).__init__(*args, **KWs)
if not self._CHEETAH__instanceInitialized:
cheetahKWArgs = {}
allowedKWs = 'searchList namespaces filter filtersLib errorCatcher'.split()
for k,v in KWs.items():
if k in allowedKWs: cheetahKWArgs[k] = v
self._initCheetahInstance(**cheetahKWArgs)
def respond(self, trans=None):
## CHEETAH: main method generated for this template
if (not trans and not self._CHEETAH__isBuffering and not callable(self.transaction)):
trans = self.transaction # is None unless self.awake() was called
if not trans:
trans = DummyTransaction()
_dummyTrans = True
else: _dummyTrans = False
write = trans.response().write
SL = self._CHEETAH__searchList
_filter = self._CHEETAH__currentFilter
########################################
## START - generated method body
self._handleCheetahInclude(VFFSL(SL,"filesdir",True) + "/templates/base_start.txt", trans=trans, includeFrom="file", raw=False)
write(u'''
''')
#
# title = title
# questions = [question object]
#
write(u'''
<p>Please answer the following questions.</p>
<form method="post" action="../symptoms">
''')
for question in VFFSL(SL,"questions",True): # generated from line 11, col 3
write(u''' ''')
_v = VFFSL(SL,"question.question",True) # u'$question.question' on line 12, col 5
if _v is not None: write(_filter(_v, rawExpr=u'$question.question')) # from line 12, col 5.
write(u'''
''')
for choice in VFFSL(SL,"question.get_input_elements",True): # generated from line 13, col 5
write(u''' ''')
_v = VFFSL(SL,"choice",True) # u'$choice' on line 14, col 7
if _v is not None: write(_filter(_v, rawExpr=u'$choice')) # from line 14, col 7.
write(u'''
''')
write(u''' <br />
''')
write(u'''<input type="submit" />
</form>
''')
self._handleCheetahInclude(VFFSL(SL,"filesdir",True) + "/templates/base_end.txt", trans=trans, includeFrom="file", raw=False)
########################################
## END - generated method body
return _dummyTrans and trans.response().getvalue() or ""
##################################################
## CHEETAH GENERATED ATTRIBUTES
_CHEETAH__instanceInitialized = False
_CHEETAH_version = __CHEETAH_version__
_CHEETAH_versionTuple = __CHEETAH_versionTuple__
_CHEETAH_genTime = __CHEETAH_genTime__
_CHEETAH_genTimestamp = __CHEETAH_genTimestamp__
_CHEETAH_src = __CHEETAH_src__
_CHEETAH_srcLastModified = __CHEETAH_srcLastModified__
_mainCheetahMethod_for_canned= 'respond'
## END CLASS DEFINITION
if not hasattr(canned, '_initCheetahAttributes'):
templateAPIClass = getattr(canned, '_CHEETAH_templateClass', Template)
templateAPIClass._addCheetahPlumbingCodeToClass(canned)
# CHEETAH was developed by Tavis Rudd and Mike Orr
# with code, advice and input from many other volunteers.
# For more information visit http://www.CheetahTemplate.org/
##################################################
## if run from command line:
if __name__ == '__main__':
from Cheetah.TemplateCmdLineIface import CmdLineIface
CmdLineIface(templateObj=canned()).run()
| Python |
# static.py
# Possibly interesting values:
# CONTENT_LENGTH:
# CONTENT_TYPE: application/x-www-form-urlencoded
# PATH_INFO: /hello/mom/and/dad.html
# QUERY_STRING: this=value&that=too
# REMOTE_ADDR: 127.0.0.1
# REQUEST_METHOD: GET
# SCRIPT_NAME:
# wsgi.errors: <file>
# wsgi.file_wrapper: <file>
# wsgi.input: <file to read request body>
# wsgi.multiprocess: False
# wsgi.multithread: True
# wsgi.run_once: False
import os
import webob
import webob.exc
Root_dir = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
Web_framework_dir = os.path.join(Root_dir, "files", "static")
Content_types = {
'html': 'text/html',
'js': 'text/javascript',
'css': 'text/css',
'gif': 'image/gif',
'png': 'image/png',
'jpeg': 'image/jpeg',
'jpg': 'image/jpeg',
}
def serve(req):
# Parse the path:
path = req.path_info.lstrip('/')
if not path:
path = 'index.html'
full_path = os.path.join(Web_framework_dir, path)
suffix = path.rsplit('.', 1)[1]
try:
with open(full_path, 'rb') as f:
data = f.read()
except IOError:
return webob.exc.HTTPNotFound(full_path)
return webob.Response(request=req, status="200 OK",
content_type=Content_types[suffix],
body=data)
| Python |
# stages.py
# Possibly interesting values:
# CONTENT_LENGTH:
# CONTENT_TYPE: application/x-www-form-urlencoded
# PATH_INFO: /hello/mom/and/dad.html
# QUERY_STRING: this=value&that=too
# REMOTE_ADDR: 127.0.0.1
# REQUEST_METHOD: GET
# SCRIPT_NAME:
# HTTP_ACCEPT_LANGUAGE: en-us,en;q=0.5
# wsgi.errors: <file>
# wsgi.file_wrapper: <file>
# wsgi.input: <file to read request body>
# wsgi.multiprocess: False
# wsgi.multithread: True
# wsgi.run_once: False
import os
import webob
import operator
# Naimath engine
from naimath.engine import engine, question
Debug = False
def init(path, supers):
global Rootdir, Localedir, Filesdir
path = os.path.abspath(path)
if not os.path.isdir(path): path = os.path.dirname(path)
for i in range(supers): path = os.path.dirname(path)
Rootdir = path
Localedir = os.path.join(path, 'translation', '0.1')
Filesdir = os.path.join(path, 'files')
if Debug: print "Rootdir", Rootdir
if Debug: print "Localedir", Localedir
if Debug: print "Filesdir", Filesdir
# Set default Localedir:
init(__file__, 2)
def load_translations(req, *domains):
languages = req.accept_language.best_matches()
if Debug: print "languages", languages
question.load_translations(Localedir, languages, *domains)
def reader(file):
for line in file:
if '#' in line: line = line[:line.index('#')]
line = line.strip()
if line: yield line
def index(req):
with open(os.path.join(Filesdir, 'static', 'index.html'), 'rt') as f:
data = f.read()
return webob.Response(request=req, status="200 OK",
content_type="text/html", body=data)
def canned_questions(req):
if Debug: print "PATH_INFO", req.path_info
if Debug: print "SCRIPT_NAME", req.script_name
load_translations(req, 'question')
from naimath.web.templates import canned
t = canned.canned()
t.title = 'Canned Questions'
t.operator = operator
t.filesdir = Filesdir
with open(os.path.join(Filesdir, 'canned_questions',
req.path_info.lstrip('/')),
'rt') \
as f:
t.questions = [question.lookup(q) for q in reader(f)]
return webob.Response(request=req, status="200 OK",
content_type="text/html", body=str(t))
def symptoms(req):
load_translations(req, 'diagnosis', 'question')
from naimath.web.templates import symptoms
t = symptoms.symptoms()
t.title = 'Symptoms'
t.phase = 'symptoms'
populate(t, req)
return webob.Response(request=req, status="200 OK",
content_type="text/html", body=str(t))
def get_answers(req):
ans = req.params
if Debug: print "answers", ans
return ans
def populate(template, req):
template.operator = operator
template.filesdir = Filesdir
answers = get_answers(req)
if Debug: print "answers", answers
if Debug: print "dict(answers)", dict(answers)
if Debug: print "answers.items()", answers.items()
template.answers = answers.items()
template.diagnosis_list = \
sorted(((question.Map['diagnosis'](d), cf,
sorted(((question.lookup(q), w) for q, w in q_list),
key = lambda t: t[1], # weight
reverse = True))
for d, cf, q_list
in engine.consult('pulmonary', question=answers)),
key = lambda x: x[1],
reverse = True)
| Python |
#!/usr/bin/python
# wsgi.py
import wsgiref.simple_server
from doctest_tools import setpath
setpath.setpath(__file__, remove_first = True)
from naimath.web import web
#web.init(__file__, 2)
httpd = wsgiref.simple_server.make_server('', 8000, web.wsgi_app)
print "Serving HTTP on port 8000..."
httpd.serve_forever()
| Python |
# score.py
r'''These all return a three-tuple:
- the score for this answer
- the max score
- the (category, question) that still needs to be asked, or None (if the
question has already been asked).
'''
from naimath.engine import engine
def got(category, question, **answers):
with engine.Engine.prove_goal("%s.%s($answer)" % (category, question)) \
as gen:
score = 0.0
found = False
for vars, plan in gen:
score += answers.get(vars['answer'], 0.0)
found = True
max_score = max(answers.itervalues())
return score, max_score, (None if found else (category, question))
def not_(category, question, answer, score):
with engine.Engine.prove_goal("%s.%s($answer)" % (category, question)) \
as gen:
found = False
for vars, plan in gen:
if vars['answer'] == answer:
return 0, score, None
found = True
if found:
return score, score, None
return 0, score, (category, question)
| Python |
# question.py
import os
import sys
import gettext
Root_dir = os.path.dirname(
os.path.dirname(
os.path.dirname(os.path.abspath(__file__))))
#print "Root_dir", Root_dir
Locale_dir = os.path.join(Root_dir, 'translation', '0.1')
#print "Locale_dir", Locale_dir
from naimath.engine import extracted_questions
Map = {} # {domain: lookup_fn} -- set by caller (web.py)
Questions = {} # {(category, q_name): question}
def load_translations(*languages):
domains = set(q_info[1] for q_info in extracted_questions.Questions)
domains.add('diagnosis')
domains.add('standard')
for domain in domains:
tr = gettext.translation(domain, Locale_dir, languages,
codeset = 'UTF-8')
Map[domain] = tr.gettext
# create all question objects:
for q_info in extracted_questions.Questions:
q_name, category, q_type = q_info[:3]
if (category, q_name) in Questions:
raise AssertionError("duplicate question: %s.%s" % (category,
q_name))
try:
q_class = Classes[q_type]
except KeyError:
raise ValueError("illegal question type, %s, for %s.%s question" %
(q_type, category, q_name))
Questions[category, q_name] = \
q_class(category, q_name, tuple(c[0] for c in q_info[3:]))
def diagnosis(name):
return Map['diagnosis'](name)
def lookup(category, q_name):
return Questions[category, q_name]
def read_canned_questions(filename):
with open(filename) as f:
questions = {}
top_list = read_categories(questions, filename,
iter(ignore_comments(f)))[0]
return questions, top_list
def read_categories(questions, filename, lines, indent = -1):
r'''Returns the top list of questions, updates 'questions'.
The returned questions are (category, question_name), which are also the
keys in questions.
It also returns the lineno, indent_level, stripped_line of the next line
after the categories at this indent level. Returns (None, None, None) at
EOF.
'questions' is {(category, question_name):
{answer: [(category, question_name)...]}}
>>> questions = {}
>>> read_categories(questions, 'some_file',
... iter(ignore_comments(('top',))), 0)
([], 1, 0, 'top')
>>> questions
{}
'''
my_indent = None
top_list = []
lineno, new_indent, line = next(lines, (None, None, None))
while lineno is not None:
if my_indent is None:
if new_indent <= indent: break
my_indent = new_indent
elif new_indent < my_indent:
break
elif new_indent != my_indent:
raise SyntaxError("Improper indent level for category",
(filename, lineno, new_indent, line))
q_list, lineno, new_indent, line = \
read_questions(line, questions, filename, lines, my_indent)
top_list.extend(q_list)
return top_list, lineno, new_indent, line
def read_questions(category, questions, filename, lines, indent):
r'''Reads questions under a category.
Returns question_list, lineno, indent, stripped_line. Question_list is a
list of (category, question_name). The lineno, indent and stripped line
are for the next line after the questions. This will be all None values
at EOF.
>>> questions = {}
>>> read_questions('cat', questions, 'some_file',
... iter(ignore_comments((
... ' q1',
... ' q2',
... 'top',
... ))), 0)
([('cat', 'q1'), ('cat', 'q2')], 3, 0, 'top')
>>> questions
{}
'''
my_indent = None
top_list = []
lineno, new_indent, line = next(lines, (None, None, None))
while lineno is not None:
if my_indent is None:
if new_indent <= indent:
raise SyntaxError("Missing Questions",
(filename, lineno, new_indent, line))
my_indent = new_indent
elif new_indent < my_indent:
break
elif new_indent != my_indent:
raise SyntaxError("Improper indent level for question",
(filename, lineno, new_indent, line))
q = category, line
top_list.append(q)
lineno, new_indent, line = \
read_answers(q, questions, filename, lines, my_indent)
return top_list, lineno, new_indent, line
def read_answers(q, questions, filename, lines, indent):
r'''Reads answers and subquestions for question 'q'.
Returns the lineno, indent, stripped_line of the next line after the
answers. This will be all None values at EOF.
>>> questions = {}
>>> read_answers(('cat', 'q1'), questions, 'some_file',
... iter(ignore_comments((
... ' a1',
... ' cat2',
... ' q3',
... ' a2',
... ' cat4',
... ' q5',
... ' q6',
... ))), 2)
(None, None, None)
>>> for q, answers in sorted(questions.iteritems()):
... print q
... for a, q_list in sorted(answers.iteritems()):
... print " ", a, q_list
('cat', 'q1')
a1 [('cat2', 'q3')]
a2 [('cat4', 'q5'), ('cat4', 'q6')]
'''
my_indent = None
lineno, new_indent, line = next(lines, (None, None, None))
while lineno is not None:
if my_indent is None:
if new_indent <= indent: break
answers = {}
questions[q] = answers
my_indent = new_indent
elif new_indent < my_indent:
break
elif new_indent != my_indent:
raise SyntaxError("Improper indent level for answer",
(filename, lineno, new_indent, line))
answer = line
nested_questions, lineno, new_indent, line = \
read_categories(questions, filename, lines, my_indent)
answers[answer] = nested_questions
return lineno, new_indent, line
def ignore_comments(f):
r'''Generate lines from f, ignoring blank lines and comments.
Yields lineno, indent_level, stripped_line:
>>> tuple(ignore_comments((' \n', ' # comment', ' hi\n')))
((3, 4, 'hi'),)
'''
for lineno, line in enumerate(f, 1):
indent, stripped = indent_level(line)
if stripped and stripped[0] != '#':
yield lineno, indent, stripped
def indent_level(line):
r'''Returns indent_level and stripped line.
>>> indent_level(' hello \n')
(4, 'hello')
'''
line = line.rstrip()
stripped = line.lstrip()
return len(line) - len(stripped), stripped
class question(object):
r'''Base class for all questions.
All of the methods here, except __init__, are obsolete.
'''
translate_choices = True
def __init__(self, category, q_name, choices):
self.category = category
self.q_name = q_name
self.question = self.lookup_question()
self.choices = tuple((choice, self.lookup_choice(choice))
for choice in choices)
def lookup_question(self):
try:
return Map[self.category](self.q_name)
except KeyError:
raise ValueError("illegal category: " + self.category)
def lookup_choice(self, choice):
try:
return Map[self.category]("%s:%s" % (self.q_name, choice))
except KeyError:
raise ValueError("illegal category: " + self.category)
def ask(self):
while True:
ans_text = self.get_answer()
try:
ans = self.translate_answer(ans_text)
print 'answer for %s.%s is %r' % \
(self.category, self.q_name, ans)
return ans
except ValueError, e:
print >> sys.stderr, e
self.print_retry()
def print_retry(self):
print "Illegal answer: try again"
def radio_input(self, value, tr):
return '<input type="radio" name="%s" value="%s" /> %s' % \
(self.q_name, value, tr)
def get_input_elements(self):
r'''Returns [input element].
'''
return [self.radio_input(v, tr) for v, tr in self.choices.iteritems()]
class single_choice(question):
r'''Single answer from a list of choices.
'''
prompt = 'Select one answer: '
def get_answer(self):
print self.question
for i, (_, answer) in enumerate(self.choices, start=1):
print i, answer
return raw_input(self.prompt)
def translate_answer(self, ans):
i = int(ans.strip())
if i < 1 or i > len(self.choices):
raise ValueError("choice out of range, select 1-%d" %
len(self.choices))
return self.choices[i-1][0]
class multiple_choices(single_choice):
r'''Multiple answers from a list of choices.
'''
prompt = 'Select as many as apply, separated by commas: '
def translate_answer(self, ans):
if ans:
return tuple(super(multiple_choices, self).translate_answer(a)
for a in ans.split(','))
else:
return ()
class yes_no(single_choice):
r'''Simple yes/no question.
'''
choices = ('yes', 'no')
translate_choices = False
def __init__(self, category, q_name, choices):
super(yes_no, self).__init__(category, q_name, self.choices)
def lookup_choice(self, choice):
try:
return Map['standard'](choice)
except KeyError:
raise ValueError("missing translation: standard")
class positive_negative(yes_no):
r'''Looking for positive/negative answer (e.g., for results of lab tests).
'''
choices = ('positive', 'negative')
class textinput(question):
r'''Answer is simply text that the user enters.
'''
def get_answer(self):
return raw_input(self.question + ' ')
def translate_answer(self, ans):
return ans
Classes = {
'single_choice': single_choice,
'multiple_choices': multiple_choices,
'textinput': textinput,
'yes_no': yes_no,
'positive_negative': positive_negative,
}
#class question(object):
# def __init__(self, name, question, f):
# self.name = name
# self.question = question
# self.init2(f)
#
# def init2(self, f):
# pass
#
# @classmethod
# def from_file(cls, filename, ans = None):
# if ans is None: ans = {}
# with read_lines(open(filename)) as f:
# for line, lineno in f:
# print "from_file got:", line, lineno
# assert not line[0].isspace(), \
# "%s(%d): indenting not allowed" % (filename, lineno)
# name, question = line.split(None, 1)
# ans[name] = cls(name, question, f)
# return ans
#
# def ask(self):
# while True:
# ans = raw_input(self.question)
# try:
# return self.convert(ans)
# except ValueError:
# pass
# self.print_retry()
#
#class yn(question):
# def convert(self, ans):
# ans = ans.lower()
# if ans in ('t', 'true', 'y', 'yes'): return True
# if ans in ('f', 'false', 'n', 'no'): return False
# raise ValueError
#
# def print_retry(self):
# print "Illegal answer: enter 'yes' or 'no' (or 'y' or 'n')"
#
#class fill_in_the_blank(question):
# def convert(self, ans):
# try:
# return int(ans)
# except ValueError:
# try:
# return float(ans)
# except ValueError:
# return ans
#
#class choice(question):
# def init2(self, f):
# self.choices = {}
# for line, lineno in f:
# if not line[0].isspace():
# f.push(line)
# print "choice", self.name, "terminated with", line
# return
# name, description = line.split(None, 1)
# self.choices[name] = description
#
# def ask(self):
# print self.question
# choices = sorted(self.choices.iteritems(),
# key=lambda (label, desc): desc)
# for i, (label, description) in enumerate(choices):
# print " %d: %s" % (i + 1, description)
# while True:
# ans = raw_input("? ")
# try:
# return choices[int(ans) - 1][0]
# except ValueError:
# pass
# self.print_retry()
#
# def print_retry(self):
# print "Illegal entry: enter a number between 1 and", len(self.choices)
class read_lines(object):
def __init__(self, file):
self.file = file
self.fileiter = iter(file)
self.line = None
self.lineno = 1
def close(self):
self.file.close()
def __enter__(self):
return self
def __exit__(self, exc_type=None, exc_val=None, exc_tb=None):
self.close()
def __iter__(self):
return self
def next(self):
if self.line is not None:
line = self.line
self.line = None
return line, self.lineno
for line in self.fileiter:
line = line.rstrip()
pound = line.find('#')
if pound >= 0: line = line[:pound]
if line.strip():
return line, self.lineno
self.lineno += 1
raise StopIteration
def push(self, line):
self.line = line
| Python |
# helpers.py
#cf_threshold = 0.2
cf_threshold = 0.0
def sum_cf(*scores):
r'''Sums the CF from each score and divides by the sum of the max
scores.
Also calculates the weight for each non None question as the max score
in the score containing the question divided by the sum of the max
scores.
Each score is: (cf, max_score, question (or None))
Result is: (cf, ((question, weight), ...))
'''
basis = sum(score[1] for score in scores)
cf = sum(score[0] for score in scores) / basis
return cf, tuple((question, max_score / basis)
for _, max_score, question in scores
if question is not None)
def mult_cf(*sums):
r'''Multiplies the CF in the sums.
This also recalculates the weight of the questions in each of the sums
by multiplying by the product of all of the other CF values. All of
the questions from the separate sums are then combined into one result
tuple.
This returns an answer of the same form as each of the sums, which is:
(cf, ((question, weight), ...))
'''
product = reduce(lambda prod, sum: prod * sum[0], sums, 1.0)
return product, \
tuple((question, weight * product / cf if cf else weight)
for cf, questions in sums
for question, weight in questions)
| Python |
# extracted_questions.py
Questions = [
['RAST', 'investigation', 'positive_negative',
['positive', 'pulmonary.krb:71 pulmonary.krb:182'],
],
['abg_pco2', 'investigation', 'multiple_choices',
['less_than_40'],
['more_than_40', 'pulmonary.krb:95'],
],
['accessorymuscles_respiration', 'sign', 'yes_no',
['yes', 'pulmonary.krb:88'],
],
['altered_consciousness', 'sign', 'yes_no',
['yes', 'pulmonary.krb:90'],
],
['altered_sensorium', 'symptom', 'yes_no',
['yes', 'pulmonary.krb:84'],
],
['asterixis', 'sign', 'yes_no',
['yes', 'pulmonary.krb:408'],
],
['breath_sounds', 'sign', 'multiple_choices',
['decreased', 'pulmonary.krb:92'],
],
['breathing_pattern', 'sign', 'multiple_choices',
['prolonged_expiration', 'pulmonary.krb:53 pulmonary.krb:164'],
],
['chest_shape', 'sign', 'multiple_choices',
['barrel_shaped', 'pulmonary.krb:52 pulmonary.krb:163 pulmonary.krb:304 pulmonary.krb:346 pulmonary.krb:382'],
],
['chestpain', 'symptom', 'yes_no',
['yes', 'pulmonary.krb:43 pulmonary.krb:154 pulmonary.krb:299 pulmonary.krb:341 pulmonary.krb:377 pulmonary.krb:432'],
],
['cold_duration', 'symptom', 'single_choice',
['less_than_2weeks'],
['more_than_2weeks', 'pulmonary.krb:228 pulmonary.krb:238'],
],
['cough', 'symptom', 'yes_no',
['yes', 'pulmonary.krb:20 pulmonary.krb:141 pulmonary.krb:281 pulmonary.krb:323 pulmonary.krb:365 pulmonary.krb:418'],
],
['cough_associated', 'symptom', 'yes_no',
['yes', 'pulmonary.krb:35 pulmonary.krb:146'],
],
['cough_duration', 'symptom', 'single_choice',
['less_than_15days', 'pulmonary.krb:372 pulmonary.krb:423'],
['more_than_15days', 'pulmonary.krb:424'],
['less_than_1yr', 'pulmonary.krb:286 pulmonary.krb:328'],
['1yr_to_2yrs', ''],
['more_than_2yr', 'pulmonary.krb:288 pulmonary.krb:330'],
['one_yr_to_2yrs', 'pulmonary.krb:287 pulmonary.krb:329'],
],
['cough_episodes', 'symptom', 'single_choice',
['less_than_3', 'pulmonary.krb:289 pulmonary.krb:331 pulmonary.krb:371'],
['more_than_3', 'pulmonary.krb:290 pulmonary.krb:332 pulmonary.krb:370'],
],
['cough_episodic', 'symptom', 'yes_no',
['yes', 'pulmonary.krb:26 pulmonary.krb:131'],
],
['cough_paroxysmal', 'symptom', 'yes_no',
['yes', 'pulmonary.krb:34 pulmonary.krb:145'],
],
['cough_timing', 'symptom', 'single_choice',
['night', 'pulmonary.krb:28 pulmonary.krb:133'],
['earlymorning', 'pulmonary.krb:27 pulmonary.krb:132'],
],
['cough_type', 'symptom', 'single_choice',
['dry', 'pulmonary.krb:41 pulmonary.krb:152 pulmonary.krb:283 pulmonary.krb:325 pulmonary.krb:367 pulmonary.krb:420'],
['productive', 'pulmonary.krb:40 pulmonary.krb:151 pulmonary.krb:282 pulmonary.krb:324 pulmonary.krb:366 pulmonary.krb:419'],
],
['crackles', 'sign', 'yes_no',
['yes', 'pulmonary.krb:437'],
],
['cxr', 'investigation', 'single_choice',
['hyperinflation', 'pulmonary.krb:59 pulmonary.krb:170 pulmonary.krb:313 pulmonary.krb:355 pulmonary.krb:388'],
['lowerlobe_infiltrate', 'pulmonary.krb:439'],
['upperlobe_inflitrate', 'pulmonary.krb:438'],
['increased_bronchial_marking', 'pulmonary.krb:314 pulmonary.krb:356 pulmonary.krb:389'],
['pleural_effusion', 'pulmonary.krb:440'],
],
['cyanosis', 'sign', 'yes_no',
['yes', 'pulmonary.krb:407'],
],
['cyanosis', 'symptom', 'yes_no',
['yes', 'pulmonary.krb:85'],
],
['dizziness', 'symptom', 'yes_no',
['yes', 'pulmonary.krb:127'],
],
['dysnpea_grade', 'symptom', 'single_choice',
['one', 'pulmonary.krb:294 pulmonary.krb:336'],
['two', 'pulmonary.krb:295 pulmonary.krb:337'],
['three', 'pulmonary.krb:296 pulmonary.krb:338'],
['four', 'pulmonary.krb:297 pulmonary.krb:339'],
],
['dysnpea_onset', 'symptom', 'single_choice',
['gradual', 'pulmonary.krb:293 pulmonary.krb:335 pulmonary.krb:375 pulmonary.krb:430'],
['sudden', 'pulmonary.krb:292 pulmonary.krb:334 pulmonary.krb:374 pulmonary.krb:429'],
],
['dyspnea', 'symptom', 'yes_no',
['yes', 'pulmonary.krb:291 pulmonary.krb:333 pulmonary.krb:373 pulmonary.krb:428'],
],
['eosinophils_in_secretions', 'investigation', 'positive_negative',
['positive', 'pulmonary.krb:216'],
['negative', 'pulmonary.krb:250'],
],
['esr', 'investigation', 'single_choice',
['raised', 'pulmonary.krb:448'],
['normal', 'pulmonary.krb:449'],
],
['exercise_test', 'investigation', 'positive_negative',
['positive', 'pulmonary.krb:69 pulmonary.krb:180'],
],
['fever', 'symptom', 'single_choice',
['low_grade', 'pulmonary.krb:198', '<100 degF'],
['high_grade', '', '>100 degF'],
['yes', 'pulmonary.krb:426'],
['none'],
],
['fever_eveningrise', 'symptom', 'yes_no',
['yes', 'pulmonary.krb:427'],
],
['fho_copd', 'symptom', 'yes_no',
['yes', 'pulmonary.krb:357'],
],
['fnac', 'investigation', 'single_choice',
['caseating_granuloma', 'pulmonary.krb:446'],
['no_granuloma', 'pulmonary.krb:447'],
],
['heart_rate', 'sign', 'multiple_choices',
['tachycardia', 'pulmonary.krb:89'],
],
['hemoptysis', 'symptom', 'yes_no',
['yes', 'pulmonary.krb:300 pulmonary.krb:342 pulmonary.krb:378 pulmonary.krb:433'],
],
['hives', 'symptom', 'yes_no',
['yes', 'pulmonary.krb:126'],
],
['hoovers_sign', 'sign', 'yes_no',
['yes', 'pulmonary.krb:406'],
],
['itching', 'symptom', 'yes_no',
['yes', 'pulmonary.krb:139'],
],
['malaise', 'symptom', 'yes_no',
['yes', 'pulmonary.krb:199 pulmonary.krb:450'],
],
['methacholine', 'investigation', 'positive_negative',
['positive', 'pulmonary.krb:68 pulmonary.krb:179'],
],
['nasal_blockage', 'symptom', 'yes_no',
['yes', 'pulmonary.krb:195 pulmonary.krb:210 pulmonary.krb:226'],
],
['nasal_secretions', 'symptom', 'single_choice',
['thin_watery', 'pulmonary.krb:212'],
['thick_purulent', 'pulmonary.krb:197'],
['none'],
],
['nightsweats', 'symptom', 'yes_no',
['yes', 'pulmonary.krb:425'],
],
['occupational_exposure_smoke', 'symptom', 'yes_no',
['yes', 'pulmonary.krb:303 pulmonary.krb:345 pulmonary.krb:381'],
],
['on_exposure', 'symptom', 'multiple_choices',
['dust', 'pulmonary.krb:213'],
],
['pft', 'investigation', 'single_choice',
['obstructive_reversibility', 'pulmonary.krb:62 pulmonary.krb:173 pulmonary.krb:307 pulmonary.krb:349 pulmonary.krb:385'],
['obstructive_nonreversibility', 'pulmonary.krb:63 pulmonary.krb:174 pulmonary.krb:306 pulmonary.krb:348 pulmonary.krb:384'],
['obstructive_yes', 'pulmonary.krb:58 pulmonary.krb:169 pulmonary.krb:308 pulmonary.krb:350 pulmonary.krb:386'],
['restrictive_yes', 'pulmonary.krb:64 pulmonary.krb:175 pulmonary.krb:309 pulmonary.krb:351 pulmonary.krb:387'],
],
['pft_dlco', 'investigation', 'single_choice',
['dlco_increased', 'pulmonary.krb:311 pulmonary.krb:353'],
['dlco_normal', 'pulmonary.krb:310 pulmonary.krb:352'],
['dlco_decreased', 'pulmonary.krb:312 pulmonary.krb:354'],
],
['pft_values', 'investigation', 'single_choice',
['raised_ratio', 'pulmonary.krb:65 pulmonary.krb:176'],
],
['pleural_rub', 'sign', 'yes_no',
['yes', 'pulmonary.krb:435'],
],
['ppd', 'investigation', 'single_choice',
['less_than_10mm', 'pulmonary.krb:445'],
['10mm_to_20mm', ''],
['more_than_20mm', 'pulmonary.krb:443'],
['ten_mm_to_20mm', 'pulmonary.krb:444'],
],
['preceding_urti', 'symptom', 'yes_no',
['yes', 'pulmonary.krb:301 pulmonary.krb:343 pulmonary.krb:379'],
],
['pulse_type', 'sign', 'multiple_choices',
['pulsus_paradoxus', 'pulmonary.krb:93'],
],
['pursed_lip_breathing', 'sign', 'yes_no',
['yes', 'pulmonary.krb:405'],
],
['respiratory_fatigue', 'sign', 'yes_no',
['yes', 'pulmonary.krb:91'],
],
['respiratory_rate', 'sign', 'single_choice',
['increased', 'pulmonary.krb:49 pulmonary.krb:160 pulmonary.krb:305 pulmonary.krb:347 pulmonary.krb:383'],
['normal'],
['decreased'],
],
['runny_nose', 'symptom', 'yes_no',
['yes', 'pulmonary.krb:194 pulmonary.krb:209 pulmonary.krb:225'],
],
['serum_IgE', 'investigation', 'single_choice',
['raised', 'pulmonary.krb:70 pulmonary.krb:181'],
],
['skin_redness', 'symptom', 'yes_no',
['yes', 'pulmonary.krb:140 pulmonary.krb:142'],
],
['skin_test_for_allergen', 'investigation', 'positive_negative',
['positive', 'pulmonary.krb:215'],
['negative', 'pulmonary.krb:248 pulmonary.krb:260'],
],
['smoker', 'symptom', 'yes_no',
['yes', 'pulmonary.krb:302 pulmonary.krb:344 pulmonary.krb:380 pulmonary.krb:434'],
],
['sneezing', 'symptom', 'yes_no',
['yes', 'pulmonary.krb:196 pulmonary.krb:211 pulmonary.krb:227'],
],
['sob', 'symptom', 'yes_no',
['yes', 'pulmonary.krb:22 pulmonary.krb:124'],
],
['sob_episodic', 'symptom', 'yes_no',
['yes', 'pulmonary.krb:29 pulmonary.krb:134'],
],
['sob_nocturnal', 'symptom', 'yes_no',
['yes', 'pulmonary.krb:30 pulmonary.krb:135'],
],
['sob_speechless', 'symptom', 'yes_no',
['yes', 'pulmonary.krb:86'],
],
['sob_triggers', 'symptom', 'single_choice',
['dust', 'pulmonary.krb:31 pulmonary.krb:136'],
],
['sputum_afb', 'investigation', 'positive_negative',
['positive', 'pulmonary.krb:441'],
['negative', 'pulmonary.krb:442'],
],
['sputum_appearance', 'symptom', 'single_choice',
['rings', 'pulmonary.krb:37 pulmonary.krb:148'],
],
['sputum_color', 'symptom', 'single_choice',
['white', 'pulmonary.krb:36 pulmonary.krb:147'],
],
['sputum_quantity', 'symptom', 'single_choice',
['low-moderate', 'pulmonary.krb:42 pulmonary.krb:153'],
],
['sputum_type', 'symptom', 'single_choice',
['mucoid', 'pulmonary.krb:284 pulmonary.krb:326 pulmonary.krb:368 pulmonary.krb:421'],
['purulent', 'pulmonary.krb:285 pulmonary.krb:327 pulmonary.krb:369 pulmonary.krb:422'],
],
['trigger', 'symptom', 'multiple_choices',
['smoke', 'pulmonary.krb:214 pulmonary.krb:249 pulmonary.krb:261'],
],
['weightloss', 'symptom', 'yes_no',
['yes', 'pulmonary.krb:436'],
],
['wheeze', 'sign', 'single_choice',
['diffuse', 'pulmonary.krb:48 pulmonary.krb:159'],
['none_of_the_above'],
],
['wheeze', 'symptom', 'yes_no',
['yes', 'pulmonary.krb:21 pulmonary.krb:125 pulmonary.krb:298 pulmonary.krb:340 pulmonary.krb:376 pulmonary.krb:431'],
],
['wheeze_episodic', 'symptom', 'yes_no',
['yes', 'pulmonary.krb:25 pulmonary.krb:130'],
],
]
| Python |
# engine.py
import sys
import operator
import itertools
from pyke import knowledge_engine, krb_traceback
def init():
global Engine
Engine = knowledge_engine.engine(__file__)
Debug = False
def calc_relevance(cf, relevance):
r'''Calculates the weighted relevance of a question.
The weighted relevence is the question relevance modified by the diagnosis
CF.
'''
return cf * relevance
def combine_relevances(relevances):
r'''Combines the weighted relevances for one question into one value.
'''
return max(relevances)
def consult(rule_base, **fact_bases):
Engine.reset()
for fb, facts in fact_bases.iteritems():
# kludge to force the creation of fb in case no facts are present...
Engine.assert_(fb, "dummy_fact", ())
for fact_name, value in facts.iteritems():
Engine.assert_(fb, fact_name, (value,))
try:
Engine.activate(rule_base)
with Engine.prove_goal(
'%s.diagnosis($disease, ($cf, $questions))' % (rule_base,)) \
as gen:
diseases = sorted(((vars['disease'], vars['cf'],
sorted(vars['questions'],
key=operator.itemgetter(1),
reverse=True))
for vars, _ in gen),
key=operator.itemgetter(1),
reverse=True)
individual_relevances = \
sorted(((category, q_name, calc_relevance(cf, relevance))
for _, cf, questions in diseases
for (category, q_name), relevance in questions),
key=operator.itemgetter(0, 1))
#print "individual_relevances:", individual_relevances
questions = sorted(((category, q_name,
combine_relevances(q_and_r[2]
for q_and_r
in q_and_relevances))
for (category, q_name), q_and_relevances
in itertools.groupby(individual_relevances,
key=operator.itemgetter(0, 1))),
key=operator.itemgetter(2),
reverse=True)
#print "questions:", questions
return diseases, questions
except Exception:
# Not sure this is really right long term, but for now ...
if Debug:
krb_traceback.print_exc()
sys.exit(1)
else:
raise
| Python |
# headache_bc.py
from __future__ import with_statement
import itertools
from pyke import contexts, pattern, bc_rule
pyke_version = '1.1.1'
compiler_version = 1
def migraine(rule, arg_patterns, arg_context):
engine = rule.rule_base.engine
patterns = rule.goal_arg_patterns()
if len(arg_patterns) == len(patterns):
context = contexts.bc_context(rule)
try:
if all(itertools.imap(lambda pat, arg:
pat.match_pattern(context, context,
arg, arg_context),
patterns,
arg_patterns)):
rule.rule_base.num_bc_rules_matched += 1
with engine.prove(rule.rule_base.root_name, 'score', context,
(rule.pattern(0),
rule.pattern(1),
rule.pattern(2),
rule.pattern(3),)) \
as gen_1:
for x_1 in gen_1:
assert x_1 is None, \
"headache.migraine: got unexpected plan from when clause 1"
with engine.prove(rule.rule_base.root_name, 'score', context,
(rule.pattern(4),
rule.pattern(5),
rule.pattern(6),
rule.pattern(7),)) \
as gen_2:
for x_2 in gen_2:
assert x_2 is None, \
"headache.migraine: got unexpected plan from when clause 2"
with engine.prove(rule.rule_base.root_name, 'score', context,
(rule.pattern(8),
rule.pattern(9),
rule.pattern(2),
rule.pattern(10),)) \
as gen_3:
for x_3 in gen_3:
assert x_3 is None, \
"headache.migraine: got unexpected plan from when clause 3"
with engine.prove(rule.rule_base.root_name, 'score', context,
(rule.pattern(11),
rule.pattern(12),
rule.pattern(6),
rule.pattern(13),)) \
as gen_4:
for x_4 in gen_4:
assert x_4 is None, \
"headache.migraine: got unexpected plan from when clause 4"
with engine.prove(rule.rule_base.root_name, 'score', context,
(rule.pattern(14),
rule.pattern(15),
rule.pattern(6),
rule.pattern(16),)) \
as gen_5:
for x_5 in gen_5:
assert x_5 is None, \
"headache.migraine: got unexpected plan from when clause 5"
mark6 = context.mark(True)
if rule.pattern(17).match_data(context, context,
sum_cf(context.lookup_data('ans1'), context.lookup_data('ans2'), context.lookup_data('ans3'), context.lookup_data('ans4'), context.lookup_data('ans5'))):
context.end_save_all_undo()
rule.rule_base.num_bc_rule_successes += 1
yield
else: context.end_save_all_undo()
context.undo_to_mark(mark6)
rule.rule_base.num_bc_rule_failures += 1
finally:
context.done()
def classical_migraine(rule, arg_patterns, arg_context):
engine = rule.rule_base.engine
patterns = rule.goal_arg_patterns()
if len(arg_patterns) == len(patterns):
context = contexts.bc_context(rule)
try:
if all(itertools.imap(lambda pat, arg:
pat.match_pattern(context, context,
arg, arg_context),
patterns,
arg_patterns)):
rule.rule_base.num_bc_rules_matched += 1
with engine.prove(rule.rule_base.root_name, 'diagnosis', context,
(rule.pattern(0),
rule.pattern(1),)) \
as gen_1:
for x_1 in gen_1:
assert x_1 is None, \
"headache.classical_migraine: got unexpected plan from when clause 1"
with engine.prove('question', 'hd_symptom_aura', context,
(rule.pattern(2),)) \
as gen_2:
for x_2 in gen_2:
assert x_2 is None, \
"headache.classical_migraine: got unexpected plan from when clause 2"
rule.rule_base.num_bc_rule_successes += 1
yield
rule.rule_base.num_bc_rule_failures += 1
finally:
context.done()
def opthalmoplegic_migraine(rule, arg_patterns, arg_context):
engine = rule.rule_base.engine
patterns = rule.goal_arg_patterns()
if len(arg_patterns) == len(patterns):
context = contexts.bc_context(rule)
try:
if all(itertools.imap(lambda pat, arg:
pat.match_pattern(context, context,
arg, arg_context),
patterns,
arg_patterns)):
rule.rule_base.num_bc_rules_matched += 1
with engine.prove(rule.rule_base.root_name, 'diagnosis', context,
(rule.pattern(0),
rule.pattern(1),)) \
as gen_1:
for x_1 in gen_1:
assert x_1 is None, \
"headache.opthalmoplegic_migraine: got unexpected plan from when clause 1"
with engine.prove('question', 'hd_sign_cns', context,
(rule.pattern(2),)) \
as gen_2:
for x_2 in gen_2:
assert x_2 is None, \
"headache.opthalmoplegic_migraine: got unexpected plan from when clause 2"
rule.rule_base.num_bc_rule_successes += 1
yield
rule.rule_base.num_bc_rule_failures += 1
finally:
context.done()
def hemiplegic_migraine(rule, arg_patterns, arg_context):
engine = rule.rule_base.engine
patterns = rule.goal_arg_patterns()
if len(arg_patterns) == len(patterns):
context = contexts.bc_context(rule)
try:
if all(itertools.imap(lambda pat, arg:
pat.match_pattern(context, context,
arg, arg_context),
patterns,
arg_patterns)):
rule.rule_base.num_bc_rules_matched += 1
with engine.prove(rule.rule_base.root_name, 'diagnosis', context,
(rule.pattern(0),
rule.pattern(1),)) \
as gen_1:
for x_1 in gen_1:
assert x_1 is None, \
"headache.hemiplegic_migraine: got unexpected plan from when clause 1"
with engine.prove('question', 'hd_sign_cns', context,
(rule.pattern(2),)) \
as gen_2:
for x_2 in gen_2:
assert x_2 is None, \
"headache.hemiplegic_migraine: got unexpected plan from when clause 2"
rule.rule_base.num_bc_rule_successes += 1
yield
rule.rule_base.num_bc_rule_failures += 1
finally:
context.done()
def basilar_migraine_1(rule, arg_patterns, arg_context):
engine = rule.rule_base.engine
patterns = rule.goal_arg_patterns()
if len(arg_patterns) == len(patterns):
context = contexts.bc_context(rule)
try:
if all(itertools.imap(lambda pat, arg:
pat.match_pattern(context, context,
arg, arg_context),
patterns,
arg_patterns)):
rule.rule_base.num_bc_rules_matched += 1
with engine.prove(rule.rule_base.root_name, 'diagnosis', context,
(rule.pattern(0),
rule.pattern(1),)) \
as gen_1:
for x_1 in gen_1:
assert x_1 is None, \
"headache.basilar_migraine_1: got unexpected plan from when clause 1"
with engine.prove('question', 'vertigo', context,
(rule.pattern(2),)) \
as gen_2:
for x_2 in gen_2:
assert x_2 is None, \
"headache.basilar_migraine_1: got unexpected plan from when clause 2"
rule.rule_base.num_bc_rule_successes += 1
yield
rule.rule_base.num_bc_rule_failures += 1
finally:
context.done()
def basilar_migraine_2(rule, arg_patterns, arg_context):
engine = rule.rule_base.engine
patterns = rule.goal_arg_patterns()
if len(arg_patterns) == len(patterns):
context = contexts.bc_context(rule)
try:
if all(itertools.imap(lambda pat, arg:
pat.match_pattern(context, context,
arg, arg_context),
patterns,
arg_patterns)):
rule.rule_base.num_bc_rules_matched += 1
with engine.prove(rule.rule_base.root_name, 'diagnosis', context,
(rule.pattern(0),
rule.pattern(1),)) \
as gen_1:
for x_1 in gen_1:
assert x_1 is None, \
"headache.basilar_migraine_2: got unexpected plan from when clause 1"
with engine.prove('question', 'ataxia', context,
(rule.pattern(2),)) \
as gen_2:
for x_2 in gen_2:
assert x_2 is None, \
"headache.basilar_migraine_2: got unexpected plan from when clause 2"
rule.rule_base.num_bc_rule_successes += 1
yield
rule.rule_base.num_bc_rule_failures += 1
finally:
context.done()
def cluster_headache_1(rule, arg_patterns, arg_context):
engine = rule.rule_base.engine
patterns = rule.goal_arg_patterns()
if len(arg_patterns) == len(patterns):
context = contexts.bc_context(rule)
try:
if all(itertools.imap(lambda pat, arg:
pat.match_pattern(context, context,
arg, arg_context),
patterns,
arg_patterns)):
rule.rule_base.num_bc_rules_matched += 1
with engine.prove(rule.rule_base.root_name, 'score', context,
(rule.pattern(0),
rule.pattern(1),
rule.pattern(2),
rule.pattern(3),)) \
as gen_1:
for x_1 in gen_1:
assert x_1 is None, \
"headache.cluster_headache_1: got unexpected plan from when clause 1"
with engine.prove(rule.rule_base.root_name, 'score', context,
(rule.pattern(4),
rule.pattern(5),
rule.pattern(2),
rule.pattern(6),)) \
as gen_2:
for x_2 in gen_2:
assert x_2 is None, \
"headache.cluster_headache_1: got unexpected plan from when clause 2"
with engine.prove(rule.rule_base.root_name, 'score', context,
(rule.pattern(7),
rule.pattern(8),
rule.pattern(2),
rule.pattern(9),)) \
as gen_3:
for x_3 in gen_3:
assert x_3 is None, \
"headache.cluster_headache_1: got unexpected plan from when clause 3"
with engine.prove(rule.rule_base.root_name, 'score', context,
(rule.pattern(10),
rule.pattern(11),
rule.pattern(12),
rule.pattern(13),)) \
as gen_4:
for x_4 in gen_4:
assert x_4 is None, \
"headache.cluster_headache_1: got unexpected plan from when clause 4"
with engine.prove(rule.rule_base.root_name, 'score', context,
(rule.pattern(14),
rule.pattern(15),
rule.pattern(16),
rule.pattern(17),)) \
as gen_5:
for x_5 in gen_5:
assert x_5 is None, \
"headache.cluster_headache_1: got unexpected plan from when clause 5"
mark6 = context.mark(True)
if rule.pattern(18).match_data(context, context,
sum_cf(context.lookup_data('ans1'), context.lookup_data('ans2'), context.lookup_data('ans3'), context.lookup_data('ans4'), context.lookup_data('ans5'))):
context.end_save_all_undo()
rule.rule_base.num_bc_rule_successes += 1
yield
else: context.end_save_all_undo()
context.undo_to_mark(mark6)
rule.rule_base.num_bc_rule_failures += 1
finally:
context.done()
def tension_headache(rule, arg_patterns, arg_context):
engine = rule.rule_base.engine
patterns = rule.goal_arg_patterns()
if len(arg_patterns) == len(patterns):
context = contexts.bc_context(rule)
try:
if all(itertools.imap(lambda pat, arg:
pat.match_pattern(context, context,
arg, arg_context),
patterns,
arg_patterns)):
rule.rule_base.num_bc_rules_matched += 1
with engine.prove(rule.rule_base.root_name, 'score', context,
(rule.pattern(0),
rule.pattern(1),
rule.pattern(2),
rule.pattern(3),)) \
as gen_1:
for x_1 in gen_1:
assert x_1 is None, \
"headache.tension_headache: got unexpected plan from when clause 1"
with engine.prove(rule.rule_base.root_name, 'score', context,
(rule.pattern(4),
rule.pattern(5),
rule.pattern(6),
rule.pattern(7),)) \
as gen_2:
for x_2 in gen_2:
assert x_2 is None, \
"headache.tension_headache: got unexpected plan from when clause 2"
mark3 = context.mark(True)
if rule.pattern(8).match_data(context, context,
sum_cf(context.lookup_data('ans1'), context.lookup_data('ans2'))):
context.end_save_all_undo()
rule.rule_base.num_bc_rule_successes += 1
yield
else: context.end_save_all_undo()
context.undo_to_mark(mark3)
rule.rule_base.num_bc_rule_failures += 1
finally:
context.done()
def depression_headache(rule, arg_patterns, arg_context):
engine = rule.rule_base.engine
patterns = rule.goal_arg_patterns()
if len(arg_patterns) == len(patterns):
context = contexts.bc_context(rule)
try:
if all(itertools.imap(lambda pat, arg:
pat.match_pattern(context, context,
arg, arg_context),
patterns,
arg_patterns)):
rule.rule_base.num_bc_rules_matched += 1
with engine.prove('question', 'hd_symptom_diurnal', context,
(rule.pattern(0),)) \
as gen_1:
for x_1 in gen_1:
assert x_1 is None, \
"headache.depression_headache: got unexpected plan from when clause 1"
with engine.prove('question', 'psych_symptom', context,
(rule.pattern(1),)) \
as gen_2:
for x_2 in gen_2:
assert x_2 is None, \
"headache.depression_headache: got unexpected plan from when clause 2"
rule.rule_base.num_bc_rule_successes += 1
yield
rule.rule_base.num_bc_rule_failures += 1
finally:
context.done()
def temporal_arteritis(rule, arg_patterns, arg_context):
engine = rule.rule_base.engine
patterns = rule.goal_arg_patterns()
if len(arg_patterns) == len(patterns):
context = contexts.bc_context(rule)
try:
if all(itertools.imap(lambda pat, arg:
pat.match_pattern(context, context,
arg, arg_context),
patterns,
arg_patterns)):
rule.rule_base.num_bc_rules_matched += 1
with engine.prove('question', 'hd_symptom_aggravated', context,
(rule.pattern(0),)) \
as gen_1:
for x_1 in gen_1:
assert x_1 is None, \
"headache.temporal_arteritis: got unexpected plan from when clause 1"
with engine.prove('question', 'labfinding', context,
(rule.pattern(1),)) \
as gen_2:
for x_2 in gen_2:
assert x_2 is None, \
"headache.temporal_arteritis: got unexpected plan from when clause 2"
rule.rule_base.num_bc_rule_successes += 1
yield
rule.rule_base.num_bc_rule_failures += 1
finally:
context.done()
def favorable_answer(rule, arg_patterns, arg_context):
engine = rule.rule_base.engine
patterns = rule.goal_arg_patterns()
if len(arg_patterns) == len(patterns):
context = contexts.bc_context(rule)
try:
if all(itertools.imap(lambda pat, arg:
pat.match_pattern(context, context,
arg, arg_context),
patterns,
arg_patterns)):
rule.rule_base.num_bc_rules_matched += 1
with engine.prove('question', context.lookup_data('question'), context,
(rule.pattern(0),)) \
as gen_1:
for x_1 in gen_1:
assert x_1 is None, \
"headache.favorable_answer: got unexpected plan from when clause 1"
with engine.prove('special', 'claim_goal', context,
()) \
as gen_2:
for x_2 in gen_2:
assert x_2 is None, \
"headache.favorable_answer: got unexpected plan from when clause 2"
rule.rule_base.num_bc_rule_successes += 1
yield
rule.rule_base.num_bc_rule_failures += 1
finally:
context.done()
def unfavorable_answer(rule, arg_patterns, arg_context):
engine = rule.rule_base.engine
patterns = rule.goal_arg_patterns()
if len(arg_patterns) == len(patterns):
context = contexts.bc_context(rule)
try:
if all(itertools.imap(lambda pat, arg:
pat.match_pattern(context, context,
arg, arg_context),
patterns,
arg_patterns)):
rule.rule_base.num_bc_rules_matched += 1
with engine.prove('question', context.lookup_data('question'), context,
(rule.pattern(0),)) \
as gen_1:
for x_1 in gen_1:
assert x_1 is None, \
"headache.unfavorable_answer: got unexpected plan from when clause 1"
with engine.prove('special', 'claim_goal', context,
()) \
as gen_2:
for x_2 in gen_2:
assert x_2 is None, \
"headache.unfavorable_answer: got unexpected plan from when clause 2"
rule.rule_base.num_bc_rule_successes += 1
yield
rule.rule_base.num_bc_rule_failures += 1
finally:
context.done()
def unanswered(rule, arg_patterns, arg_context):
engine = rule.rule_base.engine
patterns = rule.goal_arg_patterns()
if len(arg_patterns) == len(patterns):
context = contexts.bc_context(rule)
try:
if all(itertools.imap(lambda pat, arg:
pat.match_pattern(context, context,
arg, arg_context),
patterns,
arg_patterns)):
rule.rule_base.num_bc_rules_matched += 1
rule.rule_base.num_bc_rule_successes += 1
yield
rule.rule_base.num_bc_rule_failures += 1
finally:
context.done()
def populate(engine):
This_rule_base = engine.get_create('headache')
bc_rule.bc_rule('migraine', This_rule_base, 'diagnosis',
migraine, None,
(pattern.pattern_literal('migraine'),
contexts.variable('ans'),),
(),
(pattern.pattern_literal('hd_symptom_site'),
pattern.pattern_literal('unilateral'),
pattern.pattern_literal(0.8),
contexts.variable('ans1'),
pattern.pattern_literal('hd_symptom_type'),
pattern.pattern_literal('episodic'),
pattern.pattern_literal(0.4),
contexts.variable('ans2'),
pattern.pattern_literal('hd_symptom_character'),
pattern.pattern_literal('throbbing'),
contexts.variable('ans3'),
pattern.pattern_literal('hd_symptom_episode'),
pattern.pattern_literal('greater_than_2_hours'),
contexts.variable('ans4'),
pattern.pattern_literal('hd_symptom_lasts'),
pattern.pattern_literal('weeks'),
contexts.variable('ans5'),
contexts.variable('ans'),))
bc_rule.bc_rule('classical_migraine', This_rule_base, 'diagnosis',
classical_migraine, None,
(pattern.pattern_literal('classical_migraine'),
pattern.pattern_tuple((contexts.variable('cf'), pattern.pattern_tuple((pattern.pattern_literal(('hd_symptom_aura', 1.0,)),), contexts.variable('questions')),), None),),
(),
(pattern.pattern_literal('migraine'),
pattern.pattern_tuple((contexts.variable('cf'), contexts.variable('questions'),), None),
pattern.pattern_literal(True),))
bc_rule.bc_rule('opthalmoplegic_migraine', This_rule_base, 'diagnosis',
opthalmoplegic_migraine, None,
(pattern.pattern_literal('ophthalmoplegic_migraine'),
pattern.pattern_tuple((contexts.variable('cf'), pattern.pattern_tuple((pattern.pattern_literal(('hd_sign_cns', 1.0,)),), contexts.variable('questions')),), None),),
(),
(pattern.pattern_literal('migraine'),
pattern.pattern_tuple((contexts.variable('cf'), contexts.variable('questions'),), None),
pattern.pattern_literal('sixth_nerve'),))
bc_rule.bc_rule('hemiplegic_migraine', This_rule_base, 'diagnosis',
hemiplegic_migraine, None,
(pattern.pattern_literal('hemiplegic_migraine'),
pattern.pattern_tuple((contexts.variable('cf'), pattern.pattern_tuple((pattern.pattern_literal(('hd_sign_cns', 1.0,)),), contexts.variable('questions')),), None),),
(),
(pattern.pattern_literal('migraine'),
pattern.pattern_tuple((contexts.variable('cf'), contexts.variable('questions'),), None),
pattern.pattern_literal('hemiparesis'),))
bc_rule.bc_rule('basilar_migraine_1', This_rule_base, 'diagnosis',
basilar_migraine_1, None,
(pattern.pattern_literal('basilar_migraine'),
pattern.pattern_tuple((contexts.variable('cf'), pattern.pattern_tuple((pattern.pattern_literal(('vertigo', 1.0,)),), contexts.variable('questions')),), None),),
(),
(pattern.pattern_literal('migraine'),
pattern.pattern_tuple((contexts.variable('cf'), contexts.variable('questions'),), None),
pattern.pattern_literal(True),))
bc_rule.bc_rule('basilar_migraine_2', This_rule_base, 'diagnosis',
basilar_migraine_2, None,
(pattern.pattern_literal('basilar_migraine'),
pattern.pattern_tuple((contexts.variable('cf'), pattern.pattern_tuple((pattern.pattern_literal(('ataxia', 1.0,)),), contexts.variable('questions')),), None),),
(),
(pattern.pattern_literal('migraine'),
pattern.pattern_tuple((contexts.variable('cf'), contexts.variable('questions'),), None),
pattern.pattern_literal(True),))
bc_rule.bc_rule('cluster_headache_1', This_rule_base, 'diagnosis',
cluster_headache_1, None,
(pattern.pattern_literal('cluster_headache'),
contexts.variable('ans'),),
(),
(pattern.pattern_literal('hd_symptom_site'),
pattern.pattern_literal('bilateral'),
pattern.pattern_literal(8.0),
contexts.variable('ans1'),
pattern.pattern_literal('hd_symptom_type'),
pattern.pattern_literal('episodic'),
contexts.variable('ans2'),
pattern.pattern_literal('hd_symptom_location'),
pattern.pattern_literal('periorbital'),
contexts.variable('ans3'),
pattern.pattern_literal('hd_symptom_episode'),
pattern.pattern_literal('less_than_2_hours'),
pattern.pattern_literal(4.0),
contexts.variable('ans4'),
pattern.pattern_literal('hd_symptom_lasts'),
pattern.pattern_literal('less_than_4_days'),
pattern.pattern_literal(6.0),
contexts.variable('ans5'),
contexts.variable('ans'),))
bc_rule.bc_rule('tension_headache', This_rule_base, 'diagnosis',
tension_headache, None,
(pattern.pattern_literal('tension_headache'),
contexts.variable('ans'),),
(),
(pattern.pattern_literal('hd_symptom_character'),
pattern.pattern_literal('bandlike'),
pattern.pattern_literal(9.0),
contexts.variable('ans1'),
pattern.pattern_literal('hd_symptom_diurnal'),
pattern.pattern_literal('evening'),
pattern.pattern_literal(6.0),
contexts.variable('ans2'),
contexts.variable('ans'),))
bc_rule.bc_rule('depression_headache', This_rule_base, 'diagnosis',
depression_headache, None,
(pattern.pattern_literal('depression_headache'),
contexts.variable('ans'),),
(),
(pattern.pattern_literal('morning'),
pattern.pattern_literal('anhedonia'),))
bc_rule.bc_rule('temporal_arteritis', This_rule_base, 'diagnosis',
temporal_arteritis, None,
(pattern.pattern_literal('temporal_arteritis'),
contexts.variable('ans'),),
(),
(pattern.pattern_literal('chewing'),
pattern.pattern_literal('raised_esr'),))
bc_rule.bc_rule('favorable_answer', This_rule_base, 'score',
favorable_answer, None,
(contexts.variable('question'),
contexts.variable('ans'),
contexts.variable('max_score'),
pattern.pattern_tuple((contexts.variable('max_score'), contexts.variable('max_score'), pattern.pattern_literal(None),), None),),
(),
(contexts.variable('ans'),))
bc_rule.bc_rule('unfavorable_answer', This_rule_base, 'score',
unfavorable_answer, None,
(contexts.variable('question'),
contexts.anonymous('_ans'),
contexts.variable('max_score'),
pattern.pattern_tuple((pattern.pattern_literal(0), contexts.variable('max_score'), pattern.pattern_literal(None),), None),),
(),
(contexts.anonymous('_bad_ans'),))
bc_rule.bc_rule('unanswered', This_rule_base, 'score',
unanswered, None,
(contexts.variable('question'),
contexts.anonymous('_ans'),
contexts.variable('max_score'),
pattern.pattern_tuple((pattern.pattern_literal(0), contexts.variable('max_score'), contexts.variable('question'),), None),),
(),
())
from naimath.engine.helpers import cf_threshold, sum_cf, mult_cf
Krb_filename = '../headache.krb'
Krb_lineno_map = (
((16, 20), (4, 4)),
((22, 30), (6, 6)),
((31, 39), (7, 7)),
((40, 48), (8, 8)),
((49, 57), (9, 9)),
((58, 66), (10, 10)),
((69, 69), (11, 11)),
((85, 89), (15, 16)),
((91, 97), (18, 18)),
((98, 103), (19, 19)),
((116, 120), (22, 23)),
((122, 128), (25, 25)),
((129, 134), (26, 26)),
((147, 151), (29, 29)),
((153, 159), (31, 31)),
((160, 165), (32, 32)),
((178, 182), (35, 35)),
((184, 190), (37, 37)),
((191, 196), (38, 38)),
((209, 213), (41, 41)),
((215, 221), (43, 43)),
((222, 227), (44, 44)),
((240, 244), (47, 47)),
((246, 254), (49, 49)),
((255, 263), (50, 50)),
((264, 272), (51, 51)),
((273, 281), (52, 52)),
((282, 290), (53, 53)),
((293, 293), (54, 54)),
((309, 313), (57, 57)),
((315, 323), (59, 59)),
((324, 332), (60, 60)),
((335, 335), (61, 61)),
((351, 355), (64, 64)),
((357, 362), (66, 66)),
((363, 368), (67, 67)),
((381, 385), (70, 70)),
((387, 392), (72, 72)),
((393, 398), (73, 73)),
((411, 415), (76, 76)),
((417, 422), (78, 78)),
((423, 428), (79, 79)),
((441, 445), (82, 82)),
((447, 452), (84, 84)),
((453, 458), (85, 85)),
((471, 475), (88, 88)),
)
| Python |
# compiled_pyke_files.py
from pyke import target_pkg
pyke_version = '1.1.1'
compiler_version = 1
target_pkg_version = 1
try:
loader = __loader__
except NameError:
loader = None
def get_target_pkg():
return target_pkg.target_pkg(__name__, __file__, pyke_version, loader, {
('', '', 'pulmonary.krb'):
[1286859051.33671, 'pulmonary_bc.py'],
('', '', 'headache.krb'):
[1286859051.439604, 'headache_bc.py'],
},
compiler_version)
| Python |
# pulmonary_bc.py
from __future__ import with_statement
import itertools
from pyke import contexts, pattern, bc_rule
pyke_version = '1.1.1'
compiler_version = 1
def common_cold(rule, arg_patterns, arg_context):
engine = rule.rule_base.engine
patterns = rule.goal_arg_patterns()
if len(arg_patterns) == len(patterns):
context = contexts.bc_context(rule)
try:
if all(itertools.imap(lambda pat, arg:
pat.match_pattern(context, context,
arg, arg_context),
patterns,
arg_patterns)):
rule.rule_base.num_bc_rules_matched += 1
with engine.prove(rule.rule_base.root_name, 'score', context,
(rule.pattern(0),
rule.pattern(1),
rule.pattern(2),)) \
as gen_1:
for x_1 in gen_1:
assert x_1 is None, \
"pulmonary.common_cold: got unexpected plan from when clause 1"
with engine.prove(rule.rule_base.root_name, 'score', context,
(rule.pattern(3),
rule.pattern(1),
rule.pattern(4),)) \
as gen_2:
for x_2 in gen_2:
assert x_2 is None, \
"pulmonary.common_cold: got unexpected plan from when clause 2"
with engine.prove(rule.rule_base.root_name, 'score', context,
(rule.pattern(5),
rule.pattern(1),
rule.pattern(6),)) \
as gen_3:
for x_3 in gen_3:
assert x_3 is None, \
"pulmonary.common_cold: got unexpected plan from when clause 3"
with engine.prove(rule.rule_base.root_name, 'score', context,
(rule.pattern(7),
rule.pattern(8),
rule.pattern(9),)) \
as gen_4:
for x_4 in gen_4:
assert x_4 is None, \
"pulmonary.common_cold: got unexpected plan from when clause 4"
with engine.prove(rule.rule_base.root_name, 'score', context,
(rule.pattern(10),
rule.pattern(11),
rule.pattern(12),)) \
as gen_5:
for x_5 in gen_5:
assert x_5 is None, \
"pulmonary.common_cold: got unexpected plan from when clause 5"
with engine.prove(rule.rule_base.root_name, 'score', context,
(rule.pattern(13),
rule.pattern(1),
rule.pattern(14),)) \
as gen_6:
for x_6 in gen_6:
assert x_6 is None, \
"pulmonary.common_cold: got unexpected plan from when clause 6"
mark7 = context.mark(True)
if rule.pattern(15).match_data(context, context,
sum_cf(context.lookup_data('ans1'), context.lookup_data('ans2'), context.lookup_data('ans3'), context.lookup_data('ans4'), context.lookup_data('ans5'), context.lookup_data('ans6'))):
context.end_save_all_undo()
if context.lookup_data('ans')[0] >= cf_threshold :
rule.rule_base.num_bc_rule_successes += 1
yield
else: context.end_save_all_undo()
context.undo_to_mark(mark7)
rule.rule_base.num_bc_rule_failures += 1
finally:
context.done()
def allergic_rhinitis_1(rule, arg_patterns, arg_context):
engine = rule.rule_base.engine
patterns = rule.goal_arg_patterns()
if len(arg_patterns) == len(patterns):
context = contexts.bc_context(rule)
try:
if all(itertools.imap(lambda pat, arg:
pat.match_pattern(context, context,
arg, arg_context),
patterns,
arg_patterns)):
rule.rule_base.num_bc_rules_matched += 1
with engine.prove(rule.rule_base.root_name, 'score', context,
(rule.pattern(0),
rule.pattern(1),
rule.pattern(2),)) \
as gen_1:
for x_1 in gen_1:
assert x_1 is None, \
"pulmonary.allergic_rhinitis_1: got unexpected plan from when clause 1"
with engine.prove(rule.rule_base.root_name, 'score', context,
(rule.pattern(3),
rule.pattern(1),
rule.pattern(4),)) \
as gen_2:
for x_2 in gen_2:
assert x_2 is None, \
"pulmonary.allergic_rhinitis_1: got unexpected plan from when clause 2"
with engine.prove(rule.rule_base.root_name, 'score', context,
(rule.pattern(5),
rule.pattern(1),
rule.pattern(6),)) \
as gen_3:
for x_3 in gen_3:
assert x_3 is None, \
"pulmonary.allergic_rhinitis_1: got unexpected plan from when clause 3"
with engine.prove(rule.rule_base.root_name, 'score', context,
(rule.pattern(7),
rule.pattern(8),
rule.pattern(9),)) \
as gen_4:
for x_4 in gen_4:
assert x_4 is None, \
"pulmonary.allergic_rhinitis_1: got unexpected plan from when clause 4"
with engine.prove(rule.rule_base.root_name, 'score', context,
(rule.pattern(10),
rule.pattern(11),
rule.pattern(12),)) \
as gen_5:
for x_5 in gen_5:
assert x_5 is None, \
"pulmonary.allergic_rhinitis_1: got unexpected plan from when clause 5"
with engine.prove(rule.rule_base.root_name, 'score', context,
(rule.pattern(13),
rule.pattern(14),
rule.pattern(15),)) \
as gen_6:
for x_6 in gen_6:
assert x_6 is None, \
"pulmonary.allergic_rhinitis_1: got unexpected plan from when clause 6"
mark7 = context.mark(True)
if rule.pattern(16).match_data(context, context,
sum_cf(context.lookup_data('ans1'), context.lookup_data('ans2'), context.lookup_data('ans3'), context.lookup_data('ans4'), context.lookup_data('ans5'), context.lookup_data('ans6'))):
context.end_save_all_undo()
if context.lookup_data('ans')[0] >= cf_threshold :
rule.rule_base.num_bc_rule_successes += 1
yield
else: context.end_save_all_undo()
context.undo_to_mark(mark7)
rule.rule_base.num_bc_rule_failures += 1
finally:
context.done()
def allergic_rhinitis_2(rule, arg_patterns, arg_context):
engine = rule.rule_base.engine
patterns = rule.goal_arg_patterns()
if len(arg_patterns) == len(patterns):
context = contexts.bc_context(rule)
try:
if all(itertools.imap(lambda pat, arg:
pat.match_pattern(context, context,
arg, arg_context),
patterns,
arg_patterns)):
rule.rule_base.num_bc_rules_matched += 1
with engine.prove(rule.rule_base.root_name, 'score', context,
(rule.pattern(0),
rule.pattern(1),
rule.pattern(2),)) \
as gen_1:
for x_1 in gen_1:
assert x_1 is None, \
"pulmonary.allergic_rhinitis_2: got unexpected plan from when clause 1"
with engine.prove(rule.rule_base.root_name, 'score', context,
(rule.pattern(3),
rule.pattern(1),
rule.pattern(4),)) \
as gen_2:
for x_2 in gen_2:
assert x_2 is None, \
"pulmonary.allergic_rhinitis_2: got unexpected plan from when clause 2"
mark3 = context.mark(True)
if rule.pattern(5).match_data(context, context,
sum_cf(context.lookup_data('ans1'), context.lookup_data('ans2'))):
context.end_save_all_undo()
if context.lookup_data('ans')[0] >= cf_threshold :
rule.rule_base.num_bc_rule_successes += 1
yield
else: context.end_save_all_undo()
context.undo_to_mark(mark3)
rule.rule_base.num_bc_rule_failures += 1
finally:
context.done()
def perrenial_rhinitis(rule, arg_patterns, arg_context):
engine = rule.rule_base.engine
patterns = rule.goal_arg_patterns()
if len(arg_patterns) == len(patterns):
context = contexts.bc_context(rule)
try:
if all(itertools.imap(lambda pat, arg:
pat.match_pattern(context, context,
arg, arg_context),
patterns,
arg_patterns)):
rule.rule_base.num_bc_rules_matched += 1
with engine.prove(rule.rule_base.root_name, 'score', context,
(rule.pattern(0),
rule.pattern(1),
rule.pattern(2),)) \
as gen_1:
for x_1 in gen_1:
assert x_1 is None, \
"pulmonary.perrenial_rhinitis: got unexpected plan from when clause 1"
with engine.prove(rule.rule_base.root_name, 'score', context,
(rule.pattern(3),
rule.pattern(1),
rule.pattern(4),)) \
as gen_2:
for x_2 in gen_2:
assert x_2 is None, \
"pulmonary.perrenial_rhinitis: got unexpected plan from when clause 2"
with engine.prove(rule.rule_base.root_name, 'score', context,
(rule.pattern(5),
rule.pattern(1),
rule.pattern(6),)) \
as gen_3:
for x_3 in gen_3:
assert x_3 is None, \
"pulmonary.perrenial_rhinitis: got unexpected plan from when clause 3"
with engine.prove(rule.rule_base.root_name, 'score', context,
(rule.pattern(7),
rule.pattern(8),
rule.pattern(9),)) \
as gen_4:
for x_4 in gen_4:
assert x_4 is None, \
"pulmonary.perrenial_rhinitis: got unexpected plan from when clause 4"
mark5 = context.mark(True)
if rule.pattern(10).match_data(context, context,
sum_cf(context.lookup_data('ans1'), context.lookup_data('ans2'), context.lookup_data('ans3'), context.lookup_data('ans4'))):
context.end_save_all_undo()
if context.lookup_data('ans')[0] >= cf_threshold :
rule.rule_base.num_bc_rule_successes += 1
yield
else: context.end_save_all_undo()
context.undo_to_mark(mark5)
rule.rule_base.num_bc_rule_failures += 1
finally:
context.done()
def allergic_perrenial_rhintis(rule, arg_patterns, arg_context):
engine = rule.rule_base.engine
patterns = rule.goal_arg_patterns()
if len(arg_patterns) == len(patterns):
context = contexts.bc_context(rule)
try:
if all(itertools.imap(lambda pat, arg:
pat.match_pattern(context, context,
arg, arg_context),
patterns,
arg_patterns)):
rule.rule_base.num_bc_rules_matched += 1
with engine.prove(rule.rule_base.root_name, 'diagnosis', context,
(rule.pattern(0),
rule.pattern(1),)) \
as gen_1:
for x_1 in gen_1:
assert x_1 is None, \
"pulmonary.allergic_perrenial_rhintis: got unexpected plan from when clause 1"
with engine.prove(rule.rule_base.root_name, 'score', context,
(rule.pattern(2),
rule.pattern(3),
rule.pattern(1),)) \
as gen_2:
for x_2 in gen_2:
assert x_2 is None, \
"pulmonary.allergic_perrenial_rhintis: got unexpected plan from when clause 2"
mark3 = context.mark(True)
if rule.pattern(4).match_data(context, context,
sum_cf(context.lookup_data('ans1'))):
context.end_save_all_undo()
mark4 = context.mark(True)
if rule.pattern(5).match_data(context, context,
mult_cf(context.lookup_data('ans1'), context.lookup_data('sum'))):
context.end_save_all_undo()
rule.rule_base.num_bc_rule_successes += 1
yield
else: context.end_save_all_undo()
context.undo_to_mark(mark4)
else: context.end_save_all_undo()
context.undo_to_mark(mark3)
rule.rule_base.num_bc_rule_failures += 1
finally:
context.done()
def non_allergic_perrenial_rhinitis(rule, arg_patterns, arg_context):
engine = rule.rule_base.engine
patterns = rule.goal_arg_patterns()
if len(arg_patterns) == len(patterns):
context = contexts.bc_context(rule)
try:
if all(itertools.imap(lambda pat, arg:
pat.match_pattern(context, context,
arg, arg_context),
patterns,
arg_patterns)):
rule.rule_base.num_bc_rules_matched += 1
with engine.prove(rule.rule_base.root_name, 'diagnosis', context,
(rule.pattern(0),
rule.pattern(1),)) \
as gen_1:
for x_1 in gen_1:
assert x_1 is None, \
"pulmonary.non_allergic_perrenial_rhinitis: got unexpected plan from when clause 1"
with engine.prove(rule.rule_base.root_name, 'score', context,
(rule.pattern(2),
rule.pattern(3),
rule.pattern(1),)) \
as gen_2:
for x_2 in gen_2:
assert x_2 is None, \
"pulmonary.non_allergic_perrenial_rhinitis: got unexpected plan from when clause 2"
with engine.prove(rule.rule_base.root_name, 'score', context,
(rule.pattern(4),
rule.pattern(5),
rule.pattern(6),)) \
as gen_3:
for x_3 in gen_3:
assert x_3 is None, \
"pulmonary.non_allergic_perrenial_rhinitis: got unexpected plan from when clause 3"
with engine.prove(rule.rule_base.root_name, 'score', context,
(rule.pattern(7),
rule.pattern(3),
rule.pattern(8),)) \
as gen_4:
for x_4 in gen_4:
assert x_4 is None, \
"pulmonary.non_allergic_perrenial_rhinitis: got unexpected plan from when clause 4"
mark5 = context.mark(True)
if rule.pattern(9).match_data(context, context,
sum_cf(context.lookup_data('ans1'), context.lookup_data('ans2'), context.lookup_data('ans3'))):
context.end_save_all_undo()
mark6 = context.mark(True)
if rule.pattern(10).match_data(context, context,
mult_cf(context.lookup_data('ans1'), context.lookup_data('sum'))):
context.end_save_all_undo()
rule.rule_base.num_bc_rule_successes += 1
yield
else: context.end_save_all_undo()
context.undo_to_mark(mark6)
else: context.end_save_all_undo()
context.undo_to_mark(mark5)
rule.rule_base.num_bc_rule_failures += 1
finally:
context.done()
def vasomotor_rhinitis(rule, arg_patterns, arg_context):
engine = rule.rule_base.engine
patterns = rule.goal_arg_patterns()
if len(arg_patterns) == len(patterns):
context = contexts.bc_context(rule)
try:
if all(itertools.imap(lambda pat, arg:
pat.match_pattern(context, context,
arg, arg_context),
patterns,
arg_patterns)):
rule.rule_base.num_bc_rules_matched += 1
with engine.prove(rule.rule_base.root_name, 'diagnosis', context,
(rule.pattern(0),
rule.pattern(1),)) \
as gen_1:
for x_1 in gen_1:
assert x_1 is None, \
"pulmonary.vasomotor_rhinitis: got unexpected plan from when clause 1"
with engine.prove(rule.rule_base.root_name, 'score', context,
(rule.pattern(2),
rule.pattern(3),
rule.pattern(1),)) \
as gen_2:
for x_2 in gen_2:
assert x_2 is None, \
"pulmonary.vasomotor_rhinitis: got unexpected plan from when clause 2"
with engine.prove(rule.rule_base.root_name, 'score', context,
(rule.pattern(4),
rule.pattern(5),
rule.pattern(6),)) \
as gen_3:
for x_3 in gen_3:
assert x_3 is None, \
"pulmonary.vasomotor_rhinitis: got unexpected plan from when clause 3"
mark4 = context.mark(True)
if rule.pattern(7).match_data(context, context,
sum_cf(context.lookup_data('ans1'), context.lookup_data('ans2'))):
context.end_save_all_undo()
mark5 = context.mark(True)
if rule.pattern(8).match_data(context, context,
mult_cf(context.lookup_data('ans1'), context.lookup_data('sum'))):
context.end_save_all_undo()
rule.rule_base.num_bc_rule_successes += 1
yield
else: context.end_save_all_undo()
context.undo_to_mark(mark5)
else: context.end_save_all_undo()
context.undo_to_mark(mark4)
rule.rule_base.num_bc_rule_failures += 1
finally:
context.done()
def score_answered(rule, arg_patterns, arg_context):
engine = rule.rule_base.engine
patterns = rule.goal_arg_patterns()
if len(arg_patterns) == len(patterns):
context = contexts.bc_context(rule)
try:
if all(itertools.imap(lambda pat, arg:
pat.match_pattern(context, context,
arg, arg_context),
patterns,
arg_patterns)):
rule.rule_base.num_bc_rules_matched += 1
with engine.prove('question', context.lookup_data('question'), context,
(rule.pattern(0),)) \
as gen_1:
for x_1 in gen_1:
assert x_1 is None, \
"pulmonary.score_answered: got unexpected plan from when clause 1"
with engine.prove('special', 'claim_goal', context,
()) \
as gen_2:
for x_2 in gen_2:
assert x_2 is None, \
"pulmonary.score_answered: got unexpected plan from when clause 2"
max_score = 0.0
score = 0.0
for ans, ans_score in context.lookup_data('answers'):
if ans_score > max_score: max_score = ans_score
if ans == context.lookup_data('ans'): score = ans_score
mark4 = context.mark(True)
if rule.pattern(1).match_data(context, context,
max_score):
context.end_save_all_undo()
mark5 = context.mark(True)
if rule.pattern(2).match_data(context, context,
score):
context.end_save_all_undo()
rule.rule_base.num_bc_rule_successes += 1
yield
else: context.end_save_all_undo()
context.undo_to_mark(mark5)
else: context.end_save_all_undo()
context.undo_to_mark(mark4)
rule.rule_base.num_bc_rule_failures += 1
finally:
context.done()
def score_unanswered(rule, arg_patterns, arg_context):
engine = rule.rule_base.engine
patterns = rule.goal_arg_patterns()
if len(arg_patterns) == len(patterns):
context = contexts.bc_context(rule)
try:
if all(itertools.imap(lambda pat, arg:
pat.match_pattern(context, context,
arg, arg_context),
patterns,
arg_patterns)):
rule.rule_base.num_bc_rules_matched += 1
mark1 = context.mark(True)
if rule.pattern(0).match_data(context, context,
max(ans_score for ans, ans_score in context.lookup_data('answers'))):
context.end_save_all_undo()
rule.rule_base.num_bc_rule_successes += 1
yield
else: context.end_save_all_undo()
context.undo_to_mark(mark1)
rule.rule_base.num_bc_rule_failures += 1
finally:
context.done()
def score_not_match(rule, arg_patterns, arg_context):
engine = rule.rule_base.engine
patterns = rule.goal_arg_patterns()
if len(arg_patterns) == len(patterns):
context = contexts.bc_context(rule)
try:
if all(itertools.imap(lambda pat, arg:
pat.match_pattern(context, context,
arg, arg_context),
patterns,
arg_patterns)):
rule.rule_base.num_bc_rules_matched += 1
with engine.prove('question', context.lookup_data('question'), context,
(rule.pattern(0),)) \
as gen_1:
for x_1 in gen_1:
assert x_1 is None, \
"pulmonary.score_not_match: got unexpected plan from when clause 1"
with engine.prove('special', 'claim_goal', context,
()) \
as gen_2:
for x_2 in gen_2:
assert x_2 is None, \
"pulmonary.score_not_match: got unexpected plan from when clause 2"
rule.rule_base.num_bc_rule_successes += 1
yield
rule.rule_base.num_bc_rule_failures += 1
finally:
context.done()
def score_not_no_match(rule, arg_patterns, arg_context):
engine = rule.rule_base.engine
patterns = rule.goal_arg_patterns()
if len(arg_patterns) == len(patterns):
context = contexts.bc_context(rule)
try:
if all(itertools.imap(lambda pat, arg:
pat.match_pattern(context, context,
arg, arg_context),
patterns,
arg_patterns)):
rule.rule_base.num_bc_rules_matched += 1
with engine.prove('question', context.lookup_data('question'), context,
(rule.pattern(0),)) \
as gen_1:
for x_1 in gen_1:
assert x_1 is None, \
"pulmonary.score_not_no_match: got unexpected plan from when clause 1"
with engine.prove('special', 'claim_goal', context,
()) \
as gen_2:
for x_2 in gen_2:
assert x_2 is None, \
"pulmonary.score_not_no_match: got unexpected plan from when clause 2"
rule.rule_base.num_bc_rule_successes += 1
yield
rule.rule_base.num_bc_rule_failures += 1
finally:
context.done()
def score_not_unanswered(rule, arg_patterns, arg_context):
engine = rule.rule_base.engine
patterns = rule.goal_arg_patterns()
if len(arg_patterns) == len(patterns):
context = contexts.bc_context(rule)
try:
if all(itertools.imap(lambda pat, arg:
pat.match_pattern(context, context,
arg, arg_context),
patterns,
arg_patterns)):
rule.rule_base.num_bc_rules_matched += 1
rule.rule_base.num_bc_rule_successes += 1
yield
rule.rule_base.num_bc_rule_failures += 1
finally:
context.done()
def populate(engine):
This_rule_base = engine.get_create('pulmonary')
bc_rule.bc_rule('common_cold', This_rule_base, 'diagnosis',
common_cold, None,
(pattern.pattern_literal('common_cold'),
contexts.variable('ans'),),
(),
(pattern.pattern_literal('runny_nose'),
pattern.pattern_literal((('yes', 0.8,),)),
contexts.variable('ans1'),
pattern.pattern_literal('nasal_blockage'),
contexts.variable('ans2'),
pattern.pattern_literal('sneezing'),
contexts.variable('ans3'),
pattern.pattern_literal('nasal_secretions'),
pattern.pattern_literal((('thick_purulent', 0.8,),)),
contexts.variable('ans4'),
pattern.pattern_literal('fever'),
pattern.pattern_literal((('low_grade', 0.6,),)),
contexts.variable('ans5'),
pattern.pattern_literal('malaise'),
contexts.variable('ans6'),
contexts.variable('ans'),))
bc_rule.bc_rule('allergic_rhinitis_1', This_rule_base, 'diagnosis',
allergic_rhinitis_1, None,
(pattern.pattern_literal('allergic_rhinitis'),
contexts.variable('ans'),),
(),
(pattern.pattern_literal('runny_nose'),
pattern.pattern_literal((('yes', 0.8,),)),
contexts.variable('ans1'),
pattern.pattern_literal('nasal_blockage'),
contexts.variable('ans2'),
pattern.pattern_literal('sneezing'),
contexts.variable('ans3'),
pattern.pattern_literal('nasal_secretions'),
pattern.pattern_literal((('thin_watery', 0.6,),)),
contexts.variable('ans4'),
pattern.pattern_literal('on_exposure'),
pattern.pattern_literal((('dust', 0.9,),)),
contexts.variable('ans5'),
pattern.pattern_literal('trigger'),
pattern.pattern_literal((('smoke', 0.6,),)),
contexts.variable('ans6'),
contexts.variable('ans'),))
bc_rule.bc_rule('allergic_rhinitis_2', This_rule_base, 'diagnosis',
allergic_rhinitis_2, None,
(pattern.pattern_literal('allergic_rhinitis'),
contexts.variable('ans'),),
(),
(pattern.pattern_literal('skin_test_for_allergen'),
pattern.pattern_literal((('positive', 0.9,),)),
contexts.variable('ans1'),
pattern.pattern_literal('eosinophils_in_secretions'),
contexts.variable('ans2'),
contexts.variable('ans'),))
bc_rule.bc_rule('perrenial_rhinitis', This_rule_base, 'diagnosis',
perrenial_rhinitis, None,
(pattern.pattern_literal('perrenial_rhinitis'),
contexts.variable('ans'),),
(),
(pattern.pattern_literal('runny_nose'),
pattern.pattern_literal((('yes', 0.8,),)),
contexts.variable('ans1'),
pattern.pattern_literal('nasal_blockage'),
contexts.variable('ans2'),
pattern.pattern_literal('sneezing'),
contexts.variable('ans3'),
pattern.pattern_literal('cold_duration'),
pattern.pattern_literal((('more_than_2weeks', 0.9,),)),
contexts.variable('ans4'),
contexts.variable('ans'),))
bc_rule.bc_rule('allergic_perrenial_rhintis', This_rule_base, 'diagnosis',
allergic_perrenial_rhintis, None,
(pattern.pattern_literal('allergic_perrenial_rhinitis'),
contexts.variable('ans'),),
(),
(pattern.pattern_literal('allergic_rhinitis'),
contexts.variable('ans1'),
pattern.pattern_literal('cold_duration'),
pattern.pattern_literal((('more_than_2weeks', 0.9,),)),
contexts.variable('sum'),
contexts.variable('ans'),))
bc_rule.bc_rule('non_allergic_perrenial_rhinitis', This_rule_base, 'diagnosis',
non_allergic_perrenial_rhinitis, None,
(pattern.pattern_literal('non_allergic_perrenial_rhinitis'),
contexts.variable('ans'),),
(),
(pattern.pattern_literal('perrenial_rhinitis'),
contexts.variable('ans1'),
pattern.pattern_literal('skin_test_for_allergen'),
pattern.pattern_literal((('negative', 0.9,),)),
pattern.pattern_literal('trigger'),
pattern.pattern_literal((('smoke', 0.5,),)),
contexts.variable('ans2'),
pattern.pattern_literal('eosinophils_in_secretions'),
contexts.variable('ans3'),
contexts.variable('sum'),
contexts.variable('ans'),))
bc_rule.bc_rule('vasomotor_rhinitis', This_rule_base, 'diagnosis',
vasomotor_rhinitis, None,
(pattern.pattern_literal('vasomotor'),
contexts.variable('ans'),),
(),
(pattern.pattern_literal('perrenial_rhinitis'),
contexts.variable('ans1'),
pattern.pattern_literal('skin_test_for_allergen'),
pattern.pattern_literal((('negative', 0.9,),)),
pattern.pattern_literal('trigger'),
pattern.pattern_literal((('smoke', 0.5,),)),
contexts.variable('ans2'),
contexts.variable('sum'),
contexts.variable('ans'),))
bc_rule.bc_rule('score_answered', This_rule_base, 'score',
score_answered, None,
(contexts.variable('question'),
contexts.variable('answers'),
pattern.pattern_tuple((contexts.variable('score'), contexts.variable('max_score'), pattern.pattern_literal(None),), None),),
(),
(contexts.variable('ans'),
contexts.variable('max_score'),
contexts.variable('score'),))
bc_rule.bc_rule('score_unanswered', This_rule_base, 'score',
score_unanswered, None,
(contexts.variable('question'),
contexts.variable('answers'),
pattern.pattern_tuple((pattern.pattern_literal(0), contexts.variable('max_score'), contexts.variable('question'),), None),),
(),
(contexts.variable('max_score'),))
bc_rule.bc_rule('score_not_match', This_rule_base, 'score_not',
score_not_match, None,
(contexts.anonymous('_question'),
contexts.variable('ans'),
contexts.variable('score'),
pattern.pattern_tuple((pattern.pattern_literal(0), contexts.variable('score'), pattern.pattern_literal(None),), None),),
(),
(contexts.variable('ans'),))
bc_rule.bc_rule('score_not_no_match', This_rule_base, 'score_not',
score_not_no_match, None,
(contexts.anonymous('_question'),
contexts.anonymous('_ans'),
contexts.variable('score'),
pattern.pattern_tuple((contexts.variable('score'), contexts.variable('score'), pattern.pattern_literal(None),), None),),
(),
(contexts.anonymous('_different_ans'),))
bc_rule.bc_rule('score_not_unanswered', This_rule_base, 'score_not',
score_not_unanswered, None,
(contexts.variable('question'),
contexts.anonymous('_ans'),
contexts.variable('score'),
pattern.pattern_tuple((pattern.pattern_literal(0), contexts.variable('score'), contexts.variable('question'),), None),),
(),
())
from naimath.engine.helpers import cf_threshold, sum_cf, mult_cf
Krb_filename = '../pulmonary.krb'
Krb_lineno_map = (
((16, 20), (4, 4)),
((22, 29), (6, 6)),
((30, 37), (7, 7)),
((38, 45), (8, 8)),
((46, 53), (9, 9)),
((54, 61), (10, 10)),
((62, 69), (11, 11)),
((72, 72), (12, 12)),
((74, 74), (13, 13)),
((89, 93), (18, 18)),
((95, 102), (20, 20)),
((103, 110), (21, 21)),
((111, 118), (22, 22)),
((119, 126), (23, 23)),
((127, 134), (24, 24)),
((135, 142), (25, 25)),
((145, 145), (26, 26)),
((147, 147), (27, 27)),
((162, 166), (30, 30)),
((168, 175), (32, 32)),
((176, 183), (33, 33)),
((186, 186), (34, 34)),
((188, 188), (35, 35)),
((203, 207), (38, 38)),
((209, 216), (40, 40)),
((217, 224), (41, 41)),
((225, 232), (42, 42)),
((233, 240), (43, 43)),
((243, 243), (44, 44)),
((245, 245), (45, 45)),
((260, 264), (49, 49)),
((266, 272), (51, 51)),
((273, 280), (52, 52)),
((283, 283), (53, 53)),
((287, 287), (54, 54)),
((305, 309), (58, 58)),
((311, 317), (60, 60)),
((318, 325), (61, 61)),
((326, 333), (62, 62)),
((334, 341), (63, 63)),
((344, 344), (65, 65)),
((348, 348), (66, 66)),
((366, 370), (69, 69)),
((372, 378), (71, 71)),
((379, 386), (72, 72)),
((387, 394), (73, 73)),
((397, 397), (75, 75)),
((401, 401), (76, 76)),
((419, 423), (82, 82)),
((425, 430), (84, 84)),
((431, 436), (85, 85)),
((437, 441), (86, 91)),
((444, 444), (92, 92)),
((448, 448), (93, 93)),
((466, 470), (96, 96)),
((474, 474), (98, 98)),
((490, 494), (101, 101)),
((496, 501), (103, 103)),
((502, 507), (104, 104)),
((520, 524), (107, 107)),
((526, 531), (109, 109)),
((532, 537), (110, 110)),
((550, 554), (113, 113)),
)
| Python |
import wx
def hd_onset(self,pos,cf):
x=pos[0]
y=pos[1]
wx.StaticText(self.scroll21,-1," >>> Is the headache sudden in onset or gradual?"+ "\tCF: " +cf,pos=(x,y))
y=y+25
options = ['Unanswered','Gradual','Sudden over gradual','Sudden']
box=wx.RadioBox(self.scroll21,-1, "Select only one:", (x+55,y), wx.DefaultSize, options, 1, wx.RA_SPECIFY_ROWS)
def OnValidate(event):
print str.lower(str(box.GetStringSelection()))
pass
self.Bind(wx.EVT_RADIOBOX,OnValidate,box)
y=y+60
return x,y
def hd_location(self,pos,cf):
x=pos[0]
y=pos[1]
wx.StaticText(self.scroll21,-1," >>> Where Exactly do you feel the headache?"+"\tCF: " +str(cf),pos=(x,y))
sampleList = ['Front of the head (Frontal)', 'Temples (Temporal)', 'Back of the head (Occipital)', 'Sides of the head (Parietal)', 'Behind the eyes (RetroOrbital)','All over the head (Complete)', 'Multiple Areas (Temporo-parietal)','Unanswered']
y=y+25
wx.RadioBox(self.scroll21, -1, "Select only one:", (x+55, y), wx.DefaultSize,
sampleList, 2, wx.RA_SPECIFY_COLS)
y=y+135
return x,y
# Static Text requires space of 25. Single Check box row of 40. A Single radio box row of 60. A four row radiobox of 130. A textctrl with 25y button-40
def hd_duration(self,pos,cf):
x=pos[0]
y=pos[1]
wx.StaticText(self.scroll21,-1," >>> Since how long do you have headache?"+ "\tCF: " +str(cf),pos=(x,y))
y=y+25
duration=wx.TextCtrl(self.scroll21, -1, "0hrs 0days 0months 0years",size=(225, -1),pos=(x+55,y))
valbutton=wx.Button(self.scroll21,-1,"Validate",pos=(x+295,y),size=(100,25))
y=y+40
def OnValidate(event):
print duration.GetValue()
pass
self.Bind(wx.EVT_BUTTON,OnValidate,valbutton)
return x,y
def hd_episodic(self,pos,cf):
x=pos[0]
y=pos[1]
wx.StaticText(self.scroll21,-1," >>> Is the Headache Episodic?"+ "\tCF: " +str(cf),pos=(x,y))
y=y+25
options = ['Yes','No','Unanswered']
wx.RadioBox(self.scroll21,-1, "Select only one:", (x+50,y), wx.DefaultSize, options, 1, wx.RA_SPECIFY_ROWS)
y=y+60
return x,y
| Python |
#!/usr/bin/python
# gotoclass.py
import wx
class GoToClass(wx.Frame):
def __init__(self, parent, id, title):
wx.Frame.__init__(self, parent, id, title, size=(990, 850))
panel = wx.Panel(self, -1)
# STATUS Bar
self.CreateStatusBar()
self.SetStatusText("This is the statusbar")
# Menu
#FILE MENU ITEMS
filemenu = wx.Menu()
filemenu.Append( -1, "About",
"More information about this program")
filemenu.AppendSeparator()
filemenu.Append(wx.ID_EXIT, "Exit", "Terminate the program")
#SETTINGS MENU ITEMS
settingsmenu = wx.Menu()
settingsmenu.Append(wx.ID_ABOUT,"Settings","Software settings")
settingsmenu.AppendSeparator()
settingsmenu.Append(8,"&Options","Options")
#HELP MENU ITEMS
helpmenu = wx.Menu()
helpmenu.Append(wx.ID_ABOUT,"Help tutorial","Get started with tutorial")
helpmenu.AppendSeparator()
helpmenu.Append(7,"About","About this software")
#MENUBAR
menuBar = wx.MenuBar()
menuBar.Append(filemenu, "&File");
menuBar.Append(settingsmenu, "&Settings")
menuBar.Append(helpmenu, "&Help")
self.SetMenuBar(menuBar)
# FIRST VERTICAL BOX - adding boxes on top of each other, we need two boxes - one for toolbar and other for panels
vbox = wx.BoxSizer(wx.VERTICAL)
# HBOX- which will contain toolbar and its buttons
hbox1 = wx.BoxSizer(wx.HORIZONTAL)
imageFileadd = "Add.jpg"
image1 = wx.Image(imageFileadd, wx.BITMAP_TYPE_ANY).ConvertToBitmap()
imageFileback = "Back.jpg"
image2 = wx.Image(imageFileback, wx.BITMAP_TYPE_ANY).ConvertToBitmap()
imageFileforward = "Forward.jpg"
image3 = wx.Image(imageFileforward, wx.BITMAP_TYPE_ANY).ConvertToBitmap()
imageFiledelete = "Exit.jpg"
image4 = wx.Image(imageFiledelete, wx.BITMAP_TYPE_ANY).ConvertToBitmap()
imageFilecheck = "Check.jpg"
image5 = wx.Image(imageFilecheck, wx.BITMAP_TYPE_ANY).ConvertToBitmap()
imageFileprinter = "Printer.jpg"
image6 = wx.Image(imageFileprinter, wx.BITMAP_TYPE_ANY).ConvertToBitmap()
imageFilepadlock = "Lock.jpg"
image7 = wx.Image(imageFilepadlock, wx.BITMAP_TYPE_ANY).ConvertToBitmap()
imageFilesave = "Help.jpg"
image8 = wx.Image(imageFilesave, wx.BITMAP_TYPE_ANY).ConvertToBitmap()
btnnew = wx.BitmapButton(panel, -1, bitmap = image1, size=(image1.GetWidth()+5, image1.GetHeight()+5))
btnback = wx.BitmapButton(panel, -1, bitmap = image2, size=(image1.GetWidth()+5, image1.GetHeight()+5))
btnforward= wx.BitmapButton(panel, -1, bitmap = image3, size=(image1.GetWidth()+5, image1.GetHeight()+5))
btndelete = wx.BitmapButton(panel, -1, bitmap = image4, size=(image1.GetWidth()+5, image1.GetHeight()+5))
btncheck = wx.BitmapButton(panel, -1, bitmap = image5, size=(image1.GetWidth()+5, image1.GetHeight()+5))
btnprinter = wx.BitmapButton(panel, -1, bitmap = image6, size=(image1.GetWidth()+5, image1.GetHeight()+5))
btnpadlock = wx.BitmapButton(panel, -1, bitmap = image7, size=(image1.GetWidth()+5, image1.GetHeight()+5))
btnsave = wx.BitmapButton(panel, -1, bitmap = image8, size=(image1.GetWidth()+5, image1.GetHeight()+5))
hbox1.Add(btnnew, 0, wx.RIGHT, 8)
hbox1.Add(btnsave, 0, wx.RIGHT, 8)
hbox1.Add(btnback, 0, wx.RIGHT, 8)
hbox1.Add(btnforward, 0, wx.RIGHT, 8)
hbox1.Add(btndelete, 0, wx.RIGHT, 8)
hbox1.Add(btnpadlock, 0, wx.RIGHT, 8)
hbox1.Add(btncheck, 0, wx.RIGHT, 8)
hbox1.Add(btnprinter, 0, wx.RIGHT, 8)
# ADD HBOX1 i.e toolbar to vertical box.
vbox.Add(hbox1, 0, wx.EXPAND | wx.LEFT | wx.RIGHT | wx.TOP, 10)
vbox.Add((-1, 10))
# CREATE SPLITTER WINDOW and add to vertical box
splitter = wx.SplitterWindow(panel, -1)
vbox.Add(splitter, 1, wx.EXPAND | wx.ALL )
# CREATE TWO PANELS, within the splitter window
leftpanel= wx.Panel(splitter, -1, size = (800,400))
rightpanel = wx.Panel(splitter, -1)
leftpanel.SetBackgroundColour('blue')
rightpanel.SetBackgroundColour('red')
splitter.SplitVertically(leftpanel, rightpanel,600)
splitter.SetSashGravity(0.7)
splitter.SetSashSize(4)
# Lets work on left panel, we build two top and bottom panes -- lefttop and leftbottom panes
vboxleft = wx.BoxSizer(wx.VERTICAL)
leftsplitter = wx.SplitterWindow(leftpanel, -1)
vboxleft.Add(leftsplitter, 1, wx.EXPAND | wx.ALL)
lefttop = wx.Panel(leftsplitter, -1)
leftbottom = wx.Panel(leftsplitter)
lefttop.SetBackgroundColour('green')
leftbottom.SetBackgroundColour('blue')
leftsplitter.SplitHorizontally(lefttop,leftbottom,155)
leftpanel.SetSizer(vboxleft)
# Now lets create two panes in lefttop pane - topleft and topright
hboxtop=wx.BoxSizer(wx.HORIZONTAL)
topsplitter=wx.SplitterWindow(lefttop, -1)
topleft =wx.Panel(topsplitter, -1, size=(300,100))
topright = wx.Panel(topsplitter, -1)
topleft.SetBackgroundColour('black')
topright.SetBackgroundColour('gray')
topsplitter.SplitVertically(topleft,topright)
hboxtop.Add(topsplitter, 1, wx.EXPAND | wx.ALL)
topsplitter.SetSashGravity(0.6)
lefttop.SetSizer(hboxtop)
welcome =' Howdy! Welcome to Project N! \n Dr.Naimath >>> Please enter the presenting symptoms \n You >>> '
stext = wx.StaticText(topleft, -1, welcome)
stext.SetForegroundColour("white")
# Now lets add something in left bottom pane (leftbottom panel)
nb = wx.Notebook(leftbottom, -1, style=wx.NB_TOP)
general=wx.Panel(nb)
cvs=wx.Panel(nb)
git = wx.Panel(nb)
resp = wx.Panel(nb)
renal = wx.Panel(nb)
endo = wx.Panel(nb)
dermat = wx.Panel(nb)
rheum = wx.Panel(nb)
cns = wx.Panel(nb)
infect = wx.Panel(nb)
paeds = wx.Panel(nb)
obg = wx.Panel(nb)
surgery = wx.Panel(nb)
psych = wx.Panel(nb)
nb.AddPage(general,"General")
nb.AddPage(cvs,"CVS")
nb.AddPage(git,"GIT")
nb.AddPage(resp,"Respiratory")
nb.AddPage(cns,"CNS")
nb.AddPage(renal,"Renal")
nb.AddPage(endo,"Endocrine")
nb.AddPage(rheum,"Rheum")
nb.AddPage(dermat,"Dermatology")
nb.AddPage(infect,"Infectious")
nb.AddPage(psych,"Psychiatry")
nb.AddPage(paeds,"Paediatrics")
nb.AddPage(surgery,"Surgical")
nb.AddPage(obg,"OBG/GYN")
general.SetFocus()
vboxdown=wx.BoxSizer(wx.VERTICAL)
vboxdown.Add(nb,1,wx.EXPAND)
leftbottom.SetSizer(vboxdown)
# Lets work on General Tab
gbox=wx.BoxSizer(wx.VERTICAL)
general_gs = wx.BoxSizer(wx.HORIZONTAL)
cb = wx.CheckBox(general, -1, 'Headache')
general_gs.Add(cb)
cb1 = wx.CheckBox(general, -1, 'Associated with nitrates')
general_gs.Add(cb1)
cb2 = wx.CheckBox(general, -1, 'Unilateral/ Bilateral')
general_gs.Add(cb2)
cb3 = wx.CheckBox(general, -1, 'Throbbing')
general_gs.Add(cb3)
cb4 = wx.CheckBox(general, -1, 'Sharp')
general_gs.Add(cb4)
cb5 = wx.CheckBox(general, -1, 'Itching')
general_gs.Add(cb5)
cb6 = wx.CheckBox(general, -1, 'Itching')
general_gs.Add(cb6)
cb7 = wx.CheckBox(general, -1, 'Itching')
general_gs.Add(cb7)
cb8 = wx.CheckBox(general, -1, 'Itching')
general_gs.Add(cb8)
cb9 = wx.CheckBox(general, -1, 'Itching')
general_gs.Add(cb9)
wx.StaticLine(general, -1)
general.SetSizer(gbox)
gbox.Add(general_gs,1,wx.EXPAND)
# Lets work on right panel
hbox4 = wx.BoxSizer(wx.HORIZONTAL)
tc3 = wx.TextCtrl(rightpanel, -1, style=wx.TE_MULTILINE)
hbox4.Add(tc3, 1, wx.EXPAND | wx.ALL, 5)
rightpanel.SetSizer(hbox4)
# Set sizer for main panel
panel.SetSizer(vbox)
# Some lines for frame
self.Centre()
self.Show(True)
# Module runner
app = wx.App()
GoToClass(None, -1, 'Project N | Medical Expert System')
app.MainLoop()
| Python |
#!/usr/bin/env python
import wx
import wx.lib.buttons as buttons
import hd
import wx.html
class RefactorExample(wx.Frame):
def __init__(self, parent, id):
wx.Frame.__init__(self, parent, id, 'NAIMATH : Medical Expert System',
size=(1200, 800))
# Splash Screen
image = wx.Image("splash.png", wx.BITMAP_TYPE_PNG).ConvertToBitmap()
wx.SplashScreen(image, wx.SPLASH_CENTRE_ON_SCREEN |wx.SPLASH_TIMEOUT, 1000, None, -1)
wx.Yield()
self.panel = wx.Panel(self, -1)
self.panel.SetBackgroundColour("White")
hbox1 = wx.BoxSizer(wx.HORIZONTAL)
self.panel.SetSizer(hbox1)
self.panel1=wx.Panel(self.panel,-1)
self.panel1.SetSize((300,800))
#--Panel 1 starts here :
vbox1 = wx.BoxSizer(wx.VERTICAL)
self.panel1.SetSizer(vbox1)
# Window for symptoms
self.panel10=wx.Panel(self.panel1,-1)
self.panel10.SetSize((300,300))
hbox10=wx.BoxSizer(wx.HORIZONTAL)
# Panel with labels and buttons "SYMPTOMS" start here --
self.panelx1=wx.Panel(self.panel1,-1)
self.panelx1.SetBackgroundColour('lightgray')
hboxx1=wx.BoxSizer(wx.HORIZONTAL)
b=buttons.GenButton(self.panelx1,-1,'Entered Symptoms:')
b.SetFont(wx.Font(10, wx.ROMAN, wx.NORMAL, wx.BOLD, False))
b.SetBackgroundColour("#190436")
b.SetForegroundColour("white")
b.SetToolTipString("List of symptoms entered so far shown below, check those which you wish to edit/delete and click on the button to the right ")
hboxx1.Add(b,1,wx.RIGHT)
bmp = wx.Image("edit.png", wx.BITMAP_TYPE_PNG).ConvertToBitmap()
button1 = wx.BitmapButton(self.panelx1, -1, bmp, pos=(10, 20),size=(30,31),style=0)
button1.SetBackgroundColour('#190436')
button1.SetToolTipString("Edit Symptoms")
self.Bind(wx.EVT_BUTTON,self.OnCloseMe,button1)
hboxx1.Add(button1)
bmp = wx.Image("delete.png", wx.BITMAP_TYPE_PNG).ConvertToBitmap()
button2 = wx.BitmapButton(self.panelx1, -1, bmp, pos=(10, 20),size=(30,31),style=0)
button2.SetBackgroundColour('#190436')
button2.SetToolTipString("Delete Symptoms")
self.Bind(wx.EVT_BUTTON,self.OnCloseMe,button2)
hboxx1.Add(button2)
bmp = wx.Image("show.png", wx.BITMAP_TYPE_PNG).ConvertToBitmap()
button3 = wx.BitmapButton(self.panelx1, -1, bmp, pos=(10, 20),size=(30,31),style=0)
button3.SetBackgroundColour('#190436')
button3.SetToolTipString("Symptoms Browser")
self.Bind(wx.EVT_BUTTON,self.OnCloseMe,button3)
hboxx1.Add(button3)
self.panelx1.SetSizer(hboxx1)
vbox1.Add(self.panelx1,0,flag=wx.EXPAND | wx.RIGHT)
# Panel with label and buttons ends here. Scrolled window begins....
self.scroll10=wx.ScrolledWindow(self.panel10, -1, pos=wx.DefaultPosition,size=(300,210),style=wx.HSCROLL )
self.scroll10.SetScrollbars(1,1,1300,1400)
self.scroll10.SetBackgroundColour('lightgray')
hbox10.Add(self.scroll10, 1,flag=wx.EXPAND)
self.panel10.SetSizer(hbox10)
vbox1.Add(self.panel10,1,flag=wx.EXPAND)
# Add widgets for symptoms window below-
self.givenSymptoms = ['Headache Onset: Sudden', 'Headache Duration: 2 months', 'Headache Site: Unilateral', 'Headache Location: Temporal', 'Headache Aggravating factors: Music, Noise ', 'Headache Episodic: Yes ',
'Fever: No', 'Difficulty in moving neck: No', 'History of head trauma: Yes', 'Vision difficulty: Yes', 'See Zigzag bright lines?: No ', 'Vomiting: No',
'Nausea: Yes', 'Relieved by pain medications: Yes', 'Stress in life: No']
self.symptomsbox()
# Problem with adding boxsizer in scrolled window. 2. GenButton on expansion shows the border twice.
# Window for signs
# Panel with label SIGNS AND INVESTIGATION begins here --
self.panelx2=wx.Panel(self.panel1,-1)
self.panelx2.SetBackgroundColour('lightgray')
hboxx2=wx.BoxSizer(wx.HORIZONTAL)
b2=buttons.GenButton(self.panelx2,-1,'Signs and Investigations:')
b2.SetFont(wx.Font(10, wx.ROMAN, wx.NORMAL, wx.BOLD, False))
b2.SetBackgroundColour("#190436")
b2.SetForegroundColour("white")
b2.SetToolTipString("Below is the list of signs and investigations you entered, select them and click on buttons on the right to edit or delete them")
hboxx2.Add(b2,1,wx.RIGHT)
bmp = wx.Image("edit.png", wx.BITMAP_TYPE_PNG).ConvertToBitmap()
button1 = wx.BitmapButton(self.panelx2, -1, bmp, pos=(10, 20),size=(30,31),style=0)
button1.SetBackgroundColour('#190436')
button1.SetToolTipString("Edit Signs/Investigations")
self.Bind(wx.EVT_BUTTON,self.OnCloseMe,button1)
hboxx2.Add(button1)
bmp = wx.Image("delete.png", wx.BITMAP_TYPE_PNG).ConvertToBitmap()
button2 = wx.BitmapButton(self.panelx2, -1, bmp, pos=(10, 20),size=(30,31),style=0)
button2.SetBackgroundColour('#190436')
button2.SetToolTipString("Delete Signs/Investigations")
self.Bind(wx.EVT_BUTTON,self.OnCloseMe,button2)
hboxx2.Add(button2)
bmp = wx.Image("show.png", wx.BITMAP_TYPE_PNG).ConvertToBitmap()
button3 = wx.BitmapButton(self.panelx2, -1, bmp, pos=(10, 20),size=(30,31),style=0)
button3.SetBackgroundColour('#190436')
button3.SetToolTipString("Sign/Investigation Browser")
self.Bind(wx.EVT_BUTTON,self.OnCloseMe,button3)
hboxx2.Add(button3)
self.panelx2.SetSizer(hboxx2)
vbox1.Add(self.panelx2,0,flag=wx.EXPAND | wx.RIGHT)
# Panel with label ends here. Now is the scrolled window.
self.panel11=wx.Panel(self.panel1,-1)
self.panel11.SetSize((300,300))
hbox11=wx.BoxSizer(wx.HORIZONTAL)
self.scroll=wx.ScrolledWindow(self.panel11,-1,pos=wx.DefaultPosition,size=(300,210), style=wx.HSCROLL | wx.VSCROLL)
self.scroll.SetScrollbars(1,1,1300,1400)
self.scroll.SetBackgroundColour('lightgray')
hbox11.Add(self.scroll, 1,flag=wx.EXPAND)
self.panel11.SetSizer(hbox11)
vbox1.Add(self.panel11,1,flag=wx.EXPAND)
# Add widgets for signs below:
self.givenSigns = ['Head Inspection: Bruise on left temporal region', 'Eye Examination: Normal','HEENT Exam: Normal' ,'Cardiovascular Exam: Normal', 'Respiratory Exam: Normal', 'GIT Exam: Normal ', 'CNS Examination: Abnormal ',
'Weakness: Right Upper Limb Grade 4/5, Right Lower Limb 3/5', 'Sensations: Normal', 'Deep Tendon Reflexes: Normal', 'Gait and Posture: Normal', 'Higher Cerebral Functions: Normal', 'Mental Examination: Normal',
'Exam of Genitalia: Normal', 'INV_Hemoglobin: 9.5%','INV_Blood Glucose: 120 mg/dl']
self.signsbox()
#Scrolled window 3: For investigations
# Panel with label TODO INVESTIGATIONS begins --
self.panelx3=wx.Panel(self.panel1,-1)
self.panelx3.SetBackgroundColour('lightgray')
hboxx3=wx.BoxSizer(wx.HORIZONTAL)
b3=buttons.GenButton(self.panelx3,-1,'To-Do Investigations:')
b3.SetFont(wx.Font(10, wx.ROMAN, wx.NORMAL, wx.BOLD, False))
b3.SetBackgroundColour("#3373ae")
b3.SetForegroundColour("white")
b3.SetToolTipString("Below are the investigations required listed along with importance and costs. Select them and use the buttons to the right to Order an investigation or Read more about it")
hboxx3.Add(b3,1,wx.RIGHT)
bmp = wx.Image("order.png", wx.BITMAP_TYPE_PNG).ConvertToBitmap()
button1 = wx.BitmapButton(self.panelx3, -1, bmp, pos=(10, 20),size=(30,31),style=0)
button1.SetBackgroundColour('#3373ae')
button1.SetToolTipString("Order and Feed Results")
self.Bind(wx.EVT_BUTTON,self.OnCloseMe,button1)
hboxx3.Add(button1)
bmp = wx.Image("invinfo.png", wx.BITMAP_TYPE_PNG).ConvertToBitmap()
button2 = wx.BitmapButton(self.panelx3, -1, bmp, pos=(10, 20),size=(30,31),style=0)
button2.SetBackgroundColour('#3373ae')
button2.SetToolTipString("Read more about the investigation(s)")
self.Bind(wx.EVT_BUTTON,self.OnCloseMe,button2)
hboxx3.Add(button2)
bmp = wx.Image("show.png", wx.BITMAP_TYPE_PNG).ConvertToBitmap()
button3 = wx.BitmapButton(self.panelx3, -1, bmp, pos=(10, 20),size=(30,31),style=0)
button3.SetBackgroundColour('#3373ae')
button3.SetToolTipString("Investigations Browser")
self.Bind(wx.EVT_BUTTON,self.OnCloseMe,button3)
hboxx3.Add(button3)
self.panelx3.SetSizer(hboxx3)
vbox1.Add(self.panelx3,0,flag=wx.EXPAND | wx.RIGHT)
# End of labels.Scrolled window begins
self.panel12=wx.Panel(self.panel1,-1)
self.panel12.SetSize((300,300))
hbox12=wx.BoxSizer(wx.HORIZONTAL)
self.scroll1=wx.ScrolledWindow(self.panel12,-1,pos=wx.DefaultPosition, size=(300,200), style=wx.HSCROLL | wx.VSCROLL)
self.scroll1.SetScrollbars(1,1,1300,1400)
hbox12.Add(self.scroll1, 1,flag=wx.EXPAND)
self.panel12.SetSizer(hbox12)
vbox1.Add(self.panel12, 1, flag=wx.EXPAND)
self.scroll1.SetBackgroundColour('lightgray')
#Add widgets for investigationdone below:
self.todoinv = ['INV_CT Scan Head: 90% Useful, Cost: 3550Rs','INV_Complete blood picture: 90% Useful, Cost: 50Rs', 'INV_X ray Head: 90% Useful, Cost: 250Rs','INV_EEG:70% Useful, Cost: 300Rs','INV_ECG: 70% Useful, Cost: 150Rs', 'INV_Complete Urine Examination: 60% Useful, Cost: 50Rs']
self.Invbox()
#-- End of Panel 1. Start of Panel 2...
self.panel2=wx.Panel(self.panel,-1)
self.panel2.SetBackgroundColour("White")
self.panel2.SetSize((420,800))
hbox21=wx.BoxSizer(wx.HORIZONTAL)
vbox21=wx.BoxSizer(wx.VERTICAL)
self.panel2.SetSizer(vbox21)
# Panel with label ANSWER THE QUESTIONS
self.panelx22=wx.Panel(self.panel2,-1)
self.panelx22.SetBackgroundColour('lightgray')
hboxx22=wx.BoxSizer(wx.HORIZONTAL)
b4=buttons.GenButton(self.panelx22,-1,'Please answer the questions below:')
b4.SetFont(wx.Font(10, wx.ROMAN, wx.NORMAL, wx.BOLD, False))
b4.SetBackgroundColour("#3373ae")
b4.SetForegroundColour("white")
b4.SetToolTipString("Use the buttons on the right to control the session.")
hboxx22.Add(b4,1,wx.RIGHT)
bmp = wx.Image("finish.png", wx.BITMAP_TYPE_PNG).ConvertToBitmap()
button1 = wx.BitmapButton(self.panelx22, -1, bmp, pos=(10, 20),size=(30,31),style=0)
button1.SetBackgroundColour('#3373ae')
button1.SetToolTipString("Finish the session")
self.Bind(wx.EVT_BUTTON,self.OnCloseMe,button1)
hboxx22.Add(button1)
bmp = wx.Image("new.png", wx.BITMAP_TYPE_PNG).ConvertToBitmap()
button2 = wx.BitmapButton(self.panelx22, -1, bmp, pos=(10, 20),size=(30,31),style=0)
button2.SetBackgroundColour('#3373ae')
button2.SetToolTipString("Start a New Session")
self.Bind(wx.EVT_BUTTON,self.OnCloseMe,button2)
hboxx22.Add(button2)
bmp = wx.Image("edit.png", wx.BITMAP_TYPE_PNG).ConvertToBitmap()
button3 = wx.BitmapButton(self.panelx22, -1, bmp, pos=(10, 20),size=(30,31),style=0)
button3.SetBackgroundColour('#3373ae')
button3.SetToolTipString("Edit the questions")
self.Bind(wx.EVT_BUTTON,self.OnCloseMe,button3)
hboxx22.Add(button3)
bmp = wx.Image("load.png", wx.BITMAP_TYPE_PNG).ConvertToBitmap()
button4 = wx.BitmapButton(self.panelx22, -1, bmp, pos=(10, 20),size=(30,31),style=0)
button4.SetBackgroundColour('#3373ae')
button4.SetToolTipString("Load a casesheet")
self.Bind(wx.EVT_BUTTON,self.OnCloseMe,button4)
hboxx22.Add(button4)
bmp = wx.Image("show.png", wx.BITMAP_TYPE_PNG).ConvertToBitmap()
button5 = wx.BitmapButton(self.panelx22, -1, bmp, pos=(10, 20),size=(30,31),style=0)
button5.SetBackgroundColour('#3373ae')
button5.SetToolTipString("Question Browser")
self.Bind(wx.EVT_BUTTON,self.OnCloseMe,button5)
hboxx22.Add(button5)
self.panelx22.SetSizer(hboxx22)
vbox21.Add(self.panelx22,0,flag=wx.EXPAND | wx.RIGHT)
vbox21.Add(hbox21, 1, flag=wx.EXPAND)
self.scroll21=wx.ScrolledWindow(self.panel2,-1,pos=wx.DefaultPosition,size=(420,620), style=wx.HSCROLL | wx.VSCROLL)
self.scroll21.SetScrollbars(1,1,2500,2900)
self.scroll21.SetBackgroundColour("White")
hbox21.Add(self.scroll21,1,flag=wx.EXPAND)
# Add widgets of panel2 main question area below:
###############################################################################################
pos=(10,20)
cf=0.8
cf2=0.6
pos=hd.hd_onset(self,pos,str(cf))
pos=hd.hd_location(self,pos,cf2)
pos=hd.hd_duration(self,pos,cf2)
pos=hd.hd_episodic(self,pos,cf2)
#-- End of Panel 2 and start of Panel 3.
self.panel3=wx.Panel(self.panel,-1)
self.panel3.SetSize((300,800))
vbox3=wx.BoxSizer(wx.VERTICAL)
# Window for diagnosis list
self.panel30=wx.Panel(self.panel3,-1)
self.panel30.SetSize((300,300))
hbox30=wx.BoxSizer(wx.HORIZONTAL)
# Panel with label DIAGNOSIS begins --
self.panelx31=wx.Panel(self.panel3,-1)
self.panelx31.SetBackgroundColour('lightgray')
hboxx31=wx.BoxSizer(wx.HORIZONTAL)
b5=buttons.GenButton(self.panelx31,-1,"Probable Diagnosis': ")
b5.SetFont(wx.Font(10, wx.ROMAN, wx.NORMAL, wx.BOLD, False))
b5.SetBackgroundColour("#ae335c")
b5.SetForegroundColour("white")
b5.SetToolTipString("Select a diagnosis and click on the button to the right to work on them")
hboxx31.Add(b5,1,wx.RIGHT)
bmp = wx.Image("fix.png", wx.BITMAP_TYPE_PNG).ConvertToBitmap()
button1 = wx.BitmapButton(self.panelx31, -1, bmp, pos=(10, 20),size=(30,31),style=0)
button1.SetBackgroundColour('#ae335c')
button1.SetToolTipString("Freeze this diagnosis and proceed to Management")
self.Bind(wx.EVT_BUTTON,self.OnCloseMe,button1)
hboxx31.Add(button1)
bmp = wx.Image("exclusive.png", wx.BITMAP_TYPE_PNG).ConvertToBitmap()
button2 = wx.BitmapButton(self.panelx31, -1, bmp, pos=(10, 20),size=(30,31),style=0)
button2.SetBackgroundColour('#ae335c')
button2.SetToolTipString("Ask questions only related to the selected diagnosis")
self.Bind(wx.EVT_BUTTON,self.OnCloseMe,button2)
hboxx31.Add(button2)
bmp = wx.Image("help.png", wx.BITMAP_TYPE_PNG).ConvertToBitmap()
button3 = wx.BitmapButton(self.panelx31, -1, bmp, pos=(10, 20),size=(30,31),style=0)
button3.SetBackgroundColour('#ae335c')
button3.SetToolTipString("Information and Health Education regarding the selected diagnosis")
self.Bind(wx.EVT_BUTTON,self.OnCloseMe,button3)
hboxx31.Add(button3)
self.panelx31.SetSizer(hboxx31)
vbox3.Add(self.panelx31,0,flag=wx.EXPAND | wx.RIGHT)
# End of diangosis label. Scrolled window begins
self.scroll30=wx.ScrolledWindow(self.panel30, -1, pos=wx.DefaultPosition,size=(300,210))
self.scroll30.SetScrollbars(1,1,1300,1400)
hbox30.Add(self.scroll30, 1,flag=wx.EXPAND)
self.panel30.SetSizer(hbox30)
vbox3.Add(self.panel30,1,flag=wx.EXPAND)
self.scroll30.SetBackgroundColour('lightgray')
# Widgets for diagnosis
#WIndow for slider
self.panel31=wx.Panel(self.panel3,-1)
self.panel31.SetSize((300,30))
vbox3.Add(self.panel31,0,flag=wx.EXPAND | wx.RIGHT)
self.panel31.SetBackgroundColour('lightgray')
slider = wx.Slider(self.panel31, 100, 60, 1, 100, pos=(0, 0),
size=(250, -1),
style=wx.SL_HORIZONTAL | wx.SL_AUTOTICKS | wx.SL_LABELS )
st_slider=wx.StaticText(self.panel31, -1, "Threshold (Low value = Better diagnosis\n .....but more questions!)", pos=(10,35))
font1 = wx.Font(8, wx.DECORATIVE,wx.ITALIC, wx.NORMAL)
st_slider.SetFont(font1)
# Start of label EXPLANATION BOX:
self.panelx32=wx.Panel(self.panel3,-1)
self.panelx32.SetBackgroundColour('lightgray')
hboxx32=wx.BoxSizer(wx.HORIZONTAL)
b6=buttons.GenButton(self.panelx32,-1,'Explanation Box: ')
b6.SetFont(wx.Font(10, wx.ROMAN, wx.NORMAL, wx.BOLD, False))
b6.SetBackgroundColour("#3373ae")
b6.SetForegroundColour("white")
b6.SetToolTipString("Use the buttons on the right to interact")
hboxx32.Add(b6,1,wx.RIGHT)
bmp = wx.Image("edit.png", wx.BITMAP_TYPE_PNG).ConvertToBitmap()
button1 = wx.BitmapButton(self.panelx32, -1, bmp, pos=(10, 20),size=(30,31),style=0)
button1.SetBackgroundColour('#3373ae')
button1.SetToolTipString("Add a note to this explanation")
self.Bind(wx.EVT_BUTTON,self.OnCloseMe,button1)
hboxx32.Add(button1)
bmp = wx.Image("hi.png", wx.BITMAP_TYPE_PNG).ConvertToBitmap()
button2 = wx.BitmapButton(self.panelx32, -1, bmp, pos=(10, 20),size=(30,31),style=0)
button2.SetToolTipString("Chat with Bruce! regarding health issues")
button2.SetBackgroundColour('#3373ae')
self.Bind(wx.EVT_BUTTON,self.OnCloseMe,button2)
hboxx32.Add(button2)
bmp = wx.Image("show.png", wx.BITMAP_TYPE_PNG).ConvertToBitmap()
button3 = wx.BitmapButton(self.panelx32, -1, bmp, pos=(10, 20),size=(30,31),style=0)
button3.SetBackgroundColour('#3373ae')
button3.SetToolTipString("Show all explanations")
self.Bind(wx.EVT_BUTTON,self.OnCloseMe,button3)
hboxx32.Add(button3)
self.panelx32.SetSizer(hboxx32)
vbox3.Add(self.panelx32,0,flag=wx.EXPAND | wx.RIGHT)
# Window for bruce chat
self.panel32=wx.Panel(self.panel3,-1)
self.panel32.SetSize((300,300))
hbox32=wx.BoxSizer(wx.HORIZONTAL)
self.scroll32=wx.ScrolledWindow(self.panel32,-1,pos=wx.DefaultPosition,size=(300,210), style=wx.HSCROLL | wx.VSCROLL)
self.scroll32.SetScrollbars(1,1,1300,1400)
hbox32.Add(self.scroll32, 1,flag=wx.EXPAND)
self.panel32.SetSizer(hbox32)
vbox3.Add(self.panel32,1,flag=wx.EXPAND)
self.panel3.SetSizer(vbox3)
self.scroll32.SetBackgroundColour('lightgray')
# Add widgets to bruces info box
#--End of Panel3
hbox1.Add(self.panel1,1,flag=wx.EXPAND)
hbox1.Add(self.panel2,2,flag=wx.EXPAND)
hbox1.Add(self.panel3,1,flag=wx.EXPAND)
# Create Menubar
self.createMenuBar()
# Status Bar
self.CreateStatusBar()
self.SetStatusText("This is the statusbar")
self.Bind(wx.EVT_CLOSE, self.OnCloseWindow) # Close window function
def menuData(self):
return (("&File",
("&Open", "Open in status bar", self.OnOpen),
("&Quit", "Quit", self.OnCloseWindow)),
("&Edit",
("&Copy", "Copy", self.OnCopy),
("C&ut", "Cut", self.OnCut),
("&Paste", "Paste", self.OnPaste),
("", "", ""),
("&Options...", "DisplayOptions", self.OnOptions)), ("&Help", ("&About", "About", self.OnAbout)))
def createMenuBar(self):
menuBar = wx.MenuBar()
for eachMenuData in self.menuData():
menuLabel = eachMenuData[0]
menuItems = eachMenuData[1:]
menuBar.Append(self.createMenu(menuItems), menuLabel)
self.SetMenuBar(menuBar)
def createMenu(self, menuData):
menu = wx.Menu()
for eachLabel, eachStatus, eachHandler in menuData:
if not eachLabel:
menu.AppendSeparator()
continue
menuItem = menu.Append(-1, eachLabel, eachStatus)
self.Bind(wx.EVT_MENU, eachHandler, menuItem)
return menu
def OnCloseWindow(self,event):
self.Destroy()
def OnOpen(self,event):
pass
def OnCopy(self,event):
pass
def OnCut(self,event):
pass
def OnPaste(self,event):
pass
def OnOptions(self,event):
pass
def OnAbout(self,event):
dlg = SketchAbout(self)
dlg.ShowModal()
dlg.Destroy()
def symptomsbox(self):
self.GenChecks(self.givenSymptoms,self.scroll10)
def signsbox(self):
self.GenChecks(self.givenSigns, self.scroll)
def Invbox(self):
self.GenChecks(self.todoinv, self.scroll1)
def GenChecks(self,lists,panel):
x=0
y=5
for i in lists:
checkbox = wx.CheckBox(panel,-1,i,(10,y))
y=y+20
def OnCloseMe(self,event):
pass
class SketchAbout(wx.Dialog):
def __init__(self,parent):
wx.Dialog.__init__(self, parent, -1, 'About Naimath',
size=(440, 400) )
html = wx.html.HtmlWindow(self)
self.text='''<html>
<body bgcolor="#ACAA60">
<center><table bgcolor="#455481" width="100%" cellspacing="0"
cellpadding="0" border="1">
<tr>
<td align="center"><h1>Naimath v 1.0 </h1></td>
</tr>
</table>
</center>
<p><b>Naimath Medical Expert System</b> is an experimental cum demonstration program for
<b>rule based clinical decision support system.</b>
We donot take any liabilities for damage caused by the use of this software.
This software is not to replace professional medical advice but rather to supplement it.
</p>
<p><b>Naimath</b> and <b>Pyke Engine</b> are brought to you by
<b>Bruce Fredericksen</b> and <b> <br> Dr. Inayathullah Ghori</b> <br> Copyright
© 2010-2011.</p>
</body>
</html>
'''
html.SetPage(self.text)
button = wx.Button(self, wx.ID_OK, "Okay")
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(html, 1, wx.EXPAND|wx.ALL, 5)
sizer.Add(button, 0, wx.ALIGN_CENTER|wx.ALL, 5)
self.SetSizer(sizer)
self.Layout()
def run(rule_base = None, canned_questions = None):
app = wx.PySimpleApp()
frame = RefactorExample(parent=None, id=-1)
frame.Show()
app.MainLoop()
if __name__ == '__main__':
run()
# How do I get child items of a panel/scrolled pane - list all checkboxes and then see how many are marked true. Get there labels. Dictionary check. Work on them.
| Python |
# po_headers.py
import time
def write_header(f):
print >> f, r'''# Question translation for Naimath Expert System
# Copyright (C) YEAR THE PACKAGE'S COPYRIGHT HOLDER
# This file is distributed under the same license as the Naimath package.
# FIRST AUTHOR <EMAIL@ADDRESS>, YEAR.
#
#, fuzzy
msgid ""
msgstr ""
"Project-Id-Version: PACKAGE VERSION\n"
"Report-Msgid-Bugs-To: \n"
"POT-Creation-Date: %s%s\n"
"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
"Language-Team: LANGUAGE <LL@li.org>\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"''' % \
(time.strftime("%Y-%m-%d %H:%M"), tz_offset())
def tz_offset():
offset = time.altzone if time.daylight else time.timezone
sign = -1 if offset >= 0 else 1 # backwards!
hr, min = divmod(abs(offset) // 60, 60)
return "%+03d%02d" % (sign * hr, min)
| Python |
# Para hacer el ejecutable:
# python setup.py py2exe
#
"Creador de instalador para PyAfipWs (WSFEv1)"
__author__ = "Mariano Reingart (mariano@nsis.com.ar)"
__copyright__ = "Copyright (C) 2010 Mariano Reingart"
from distutils.core import setup
import py2exe
import glob, sys
# includes for py2exe
includes=['email.generator', 'email.iterators', 'email.message', 'email.utils']
# don't pull in all this MFC stuff used by the makepy UI.
excludes=["pywin", "pywin.dialogs", "pywin.dialogs.list", "win32ui"]
opts = {
'py2exe': {
'includes':includes,
'optimize':2,
'excludes': excludes,
'dll_excludes': ["mswsock.dll", "powrprof.dll", "KERNELBASE.dll",
"API-MS-Win-Core-LocalRegistry-L1-1-0.dll",
"API-MS-Win-Core-ProcessThreads-L1-1-0.dll",
"API-MS-Win-Security-Base-L1-1-0.dll"
],
'skip_archive': True,
}}
data_files = [
(".", ["wsfev1_wsdl.xml","wsfev1_wsdl_homo.xml", "licencia.txt", "rece.ini.dist", "geotrust.crt"]),
("cache", glob.glob("cache/*")),
]
import wsfev1, rece1, wsaa
from nsis import build_installer, Target
setup(
name="WSFEV1",
version=wsfev1.__version__ + (wsfev1.HOMO and '-homo' or '-full'),
description="Interfaz PyAfipWs WSFEv1 %s",
long_description=wsfev1.__doc__,
author="Mariano Reingart",
author_email="reingart@gmail.com",
url="http://www.sistemasagiles.com.ar",
license="GNU GPL v3",
com_server = [Target(module=wsfev1,modules="wsfev1")],
console=[Target(module=wsfev1, script='wsfev1.py', dest_base="wsfev1_cli"),
Target(module=rece1, script='rece1.py'),
Target(module=wsaa, script='wsaa.py'),
],
options=opts,
data_files = data_files,
cmdclass = {"py2exe": build_installer}
)
| Python |
# Para hacer el ejecutable:
# python setup.py py2exe
#
"""
__version__ = "$Revision: 1.3 $"
__date__ = "$Date: 2005/04/05 18:44:54 $"
"""
__author__ = "Mariano Reingart (reingart@gmail.com)"
__copyright__ = "Copyright (C) 2008 Mariano Reingart"
from distutils.core import setup
import py2exe
import sys
if sys.platform == 'darwin':
import py2app
buildstyle = 'app'
else:
import py2exe
buildstyle = 'windows'
# find pythoncard resources, to add as 'data_files'
import os
pycard_resources=[]
for filename in os.listdir('.'):
if filename.find('.rsrc.')>-1:
pycard_resources+=[filename]
# includes for py2exe
includes=[]
for comp in ['button','image','staticbox','radiogroup', 'imagebutton',
'statictext','textarea','textfield','passwordfield', 'checkbox',
'tree','multicolumnlist','list','gauge','choice',
]:
includes += ['PythonCard.components.'+comp]
print 'includes',includes
includes+=['email.generator', 'email.iterators', 'email.message', 'email.utils']
opts = {
'py2exe': {
'includes':includes,
'optimize':2}
}
import pyrece, pyfpdf_hg.designer
from nsis import build_installer, Target
import glob
data_files = [
(".", ["wsfev1_wsdl.xml","wsfev1_wsdl_homo.xml", "licencia.txt",
"C:\python25\lib\site-packages\wx-2.8-msw-unicode\wx\MSVCP71.dll",
"C:\python25\lib\site-packages\wx-2.8-msw-unicode\wx\gdiplus.dll",
"logo.png",
"rece.ini.dist", "factura.csv",
"facturas.csv", "facturas.json", "facturas.txt"]),
("cache", glob.glob("cache/*")),
]
setup( name = "PyRece",
version=pyrece.__version__ + (pyrece.HOMO and '-homo' or '-full'),
description="PyRece %s" % pyrece.__version__,
long_description=pyrece.__doc__,
author="Mariano Reingart",
author_email="reingart@gmail.com",
url="http://www.sistemasagiles.com.ar",
license="GNU GPL v3",
data_files = [ (".", pycard_resources),
(".",["logo.png",]) ] + data_files,
options=opts,
cmdclass = {"py2exe": build_installer},
**{buildstyle: [Target(module=pyrece, script='pyrece.py'),
Target(module=pyfpdf_hg.designer, script='pyfpdf_hg/designer.py')],
'console': [Target(module=pyrece, script="pyrece.py", dest_base="pyrece_consola")]
}
)
| Python |
# Para hacer el ejecutable:
# python setup.py py2exe
#
"Creador de instalador para PyAfipWs (WSAA)"
__author__ = "Mariano Reingart (mariano@nsis.com.ar)"
__copyright__ = "Copyright (C) 2011 Mariano Reingart"
from distutils.core import setup
import py2exe
import glob, sys
# includes for py2exe
includes=['email.generator', 'email.iterators', 'email.message', 'email.utils']
# don't pull in all this MFC stuff used by the makepy UI.
excludes=["pywin", "pywin.dialogs", "pywin.dialogs.list", "win32ui"]
opts = {
'py2exe': {
'includes':includes,
'optimize':2,
'excludes': excludes,
'dll_excludes': ["mswsock.dll", "powrprof.dll", "KERNELBASE.dll",
"API-MS-Win-Core-LocalRegistry-L1-1-0.dll",
"API-MS-Win-Core-ProcessThreads-L1-1-0.dll",
"API-MS-Win-Security-Base-L1-1-0.dll"
],
'skip_archive': True,
}}
data_files = [
(".", ["licencia.txt", "geotrust.crt"]),
("cache", glob.glob("cache/*")),
]
import wsaa
from nsis import build_installer, Target
setup(
name="WSAA",
version=wsaa.__version__ + (wsaa.HOMO and '-homo' or '-full'),
description="Interfaz PyAfipWs WSAA %s",
long_description=wsaa.__doc__,
author="Mariano Reingart",
author_email="reingart@gmail.com",
url="http://www.sistemasagiles.com.ar",
license="GNU GPL v3",
com_server = [Target(module=wsaa, modules='wsaa', create_exe=True, create_dll=True)],
console=[Target(module=wsaa, script="wsaa.py", dest_base="wsaa-cli")],
options=opts,
data_files = data_files,
cmdclass = {"py2exe": build_installer}
)
| Python |
# Para hacer el ejecutable:
# python setup.py py2exe
#
"Creador de instalador para PyAfipWs"
__author__ = "Mariano Reingart (mariano@nsis.com.ar)"
__copyright__ = "Copyright (C) 2008 Mariano Reingart"
from distutils.core import setup
import py2exe
import sys
# includes for py2exe
includes=['email.generator', 'email.iterators', 'email.message', 'email.utils']
opts = {
'py2exe': {
'includes':includes,
'optimize':2}
}
setup( name = "PyAfipWs",
com_server = ["pyafipws"],
console=['rece.py', 'receb.py', 'recex.py', 'rg1361.py', 'wsaa.py', 'wsfex.py', 'wsbfe.py'],
options=opts,
)
| Python |
# Para hacer el ejecutable:
# python setup.py py2exe
#
"Creador de instalador para PyAfipWs (WSFEXv1)"
__author__ = "Mariano Reingart (mariano@nsis.com.ar)"
__copyright__ = "Copyright (C) 2010 Mariano Reingart"
from distutils.core import setup
import py2exe
import glob, sys
# includes for py2exe
includes=['email.generator', 'email.iterators', 'email.message', 'email.utils']
# don't pull in all this MFC stuff used by the makepy UI.
excludes=["pywin", "pywin.dialogs", "pywin.dialogs.list", "win32ui"]
opts = {
'py2exe': {
'includes':includes,
'optimize':2,
'excludes': excludes,
'dll_excludes': ["mswsock.dll", "powrprof.dll", "KERNELBASE.dll",
"API-MS-Win-Core-LocalRegistry-L1-1-0.dll",
"API-MS-Win-Core-ProcessThreads-L1-1-0.dll",
"API-MS-Win-Security-Base-L1-1-0.dll"
],
'skip_archive': True,
}}
data_files = [
(".", ["licencia.txt", "rece.ini.dist", "geotrust.crt"]),
("cache", glob.glob("cache/*")),
]
import wsfexv1, wsaa, recex1
from nsis import build_installer, Target
setup(
name="WSFEXV1",
version=wsfexv1.__version__ + (wsfexv1.HOMO and '-homo' or '-full'),
description="Interfaz PyAfipWs WSFEXv1 %s",
long_description=wsfexv1.__doc__,
author="Mariano Reingart",
author_email="reingart@gmail.com",
url="http://www.sistemasagiles.com.ar",
license="GNU GPL v3",
com_server = [Target(module=wsfexv1,modules="wsfexv1")],
console=[Target(module=wsfexv1, script='wsfexv1.py', dest_base="wsfexv1_cli"),
Target(module=recex1, script='recex1.py', dest_base="recex1"),
Target(module=wsaa, script='wsaa.py'),
],
options=opts,
data_files = data_files,
cmdclass = {"py2exe": build_installer}
)
| Python |
#!/usr/bin/python
# -*- coding: latin-1 -*-
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation; either version 3, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTIBILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
"Py2Exe extension to build NSIS Installers"
# Based on py2exe/samples/extending/setup.py:
# "A setup script showing how to extend py2exe."
# Copyright (c) 2000-2008 Thomas Heller, Mark Hammond, Jimmy Retzlaff
__author__ = "Mariano Reingart (reingart@gmail.com)"
__copyright__ = "Copyright (C) 2011 Mariano Reingart"
__license__ = "GPL 3.0"
import os
import sys
from py2exe.build_exe import py2exe
nsi_base_script = r"""\
; base.nsi
; WARNING: This script has been created by py2exe. Changes to this script
; will be overwritten the next time py2exe is run!
XPStyle on
Page license
Page directory
;Page components
Page instfiles
RequestExecutionLevel admin
LoadLanguageFile "${NSISDIR}\Contrib\Language files\English.nlf"
LoadLanguageFile "${NSISDIR}\Contrib\Language files\Spanish.nlf"
# set license page
LicenseText ""
LicenseData "licencia.txt"
LicenseForceSelection checkbox
; use the default string for the directory page.
DirText ""
Name "%(description)s"
OutFile "%(out_file)s"
;SetCompress off ; disable compression (testing)
SetCompressor /SOLID lzma
;InstallDir %(install_dir)s
InstallDir $PROGRAMFILES\%(install_dir)s
InstallDirRegKey HKLM "Software\%(reg_key)s" "Install_Dir"
VIProductVersion "%(product_version)s"
VIAddVersionKey /LANG=${LANG_ENGLISH} "ProductName" "%(name)s"
VIAddVersionKey /LANG=${LANG_ENGLISH} "FileDescription" "%(description)s"
VIAddVersionKey /LANG=${LANG_ENGLISH} "CompanyName" "%(company_name)s"
VIAddVersionKey /LANG=${LANG_ENGLISH} "FileVersion" "%(product_version)s"
VIAddVersionKey /LANG=${LANG_ENGLISH} "LegalCopyright" "%(copyright)s"
;VIAddVersionKey /LANG=${LANG_ENGLISH} "InternalName" "FileSetup.exe"
Section %(name)s
; uninstall old version
ReadRegStr $R0 HKLM "Software\Microsoft\Windows\CurrentVersion\Uninstall\%(reg_key)s" "UninstallString"
StrCmp $R0 "" notistalled
ExecWait '$R0 /S _?=$INSTDIR'
notistalled:
SectionIn RO
SetOutPath $INSTDIR
File /r dist\*.*
IfFileExists $INSTDIR\\rece.ini.dist 0 +3
IfFileExists $INSTDIR\\rece.ini +2 0
CopyFiles $INSTDIR\\rece.ini.dist $INSTDIR\\rece.ini
WriteRegStr HKLM SOFTWARE\%(reg_key)s "Install_Dir" "$INSTDIR"
; Write the uninstall keys for Windows
WriteRegStr HKLM "Software\Microsoft\Windows\CurrentVersion\Uninstall\%(reg_key)s" "DisplayName" "%(description)s (solo eliminar)"
WriteRegStr HKLM "Software\Microsoft\Windows\CurrentVersion\Uninstall\%(reg_key)s" "UninstallString" "$INSTDIR\Uninst.exe"
WriteUninstaller "Uninst.exe"
;To Register a DLL
%(register_com_servers)s
IfFileExists $INSTDIR\\pyrece.exe 0 +3
;create start-menu items
CreateDirectory "$SMPROGRAMS\%(name)s"
CreateShortCut "$SMPROGRAMS\%(name)s\PyRece.lnk" "$INSTDIR\pyrece.exe" "" "$INSTDIR\pyrece.exe" 0
CreateShortCut "$SMPROGRAMS\%(name)s\Designer.lnk" "$INSTDIR\designer.exe" "" "$INSTDIR\designer.exe" 0
;CreateShortCut "$SMPROGRAMS\%(name)s\Uninstall.lnk" "$INSTDIR\Uninst.exe" "" "$INSTDIR\Uninst.exe" 0
SectionEnd
Section "Uninstall"
;To Unregister a DLL
%(unregister_com_servers)s
;Delete Files
;Delete Uninstaller And Unistall Registry Entries
Delete "$INSTDIR\Uninst.exe"
DeleteRegKey HKEY_LOCAL_MACHINE "SOFTWARE\%(reg_key)s"
DeleteRegKey HKEY_LOCAL_MACHINE "SOFTWARE\Microsoft\Windows\CurrentVersion\Uninstall\%(reg_key)s"
RMDir "$INSTDIR"
SectionEnd
;--------------------------------
Function .onInit
IfSilent nolangdialog
;Language selection dialog
Push ""
Push ${LANG_ENGLISH}
Push English
Push ${LANG_SPANISH}
Push Spanish
Push A ; A means auto count languages
; for the auto count to work the first empty push (Push "") must remain
LangDLL::LangDialog "Installer Language" "Please select the language of the installer"
Pop $LANGUAGE
StrCmp $LANGUAGE "cancel" 0 +2
Abort
nolangdialog:
FunctionEnd
"""
register_com_server = """\
RegDLL "$INSTDIR\%s"
"""
unregister_com_server= """\
UnRegDLL "$INSTDIR\%s"
"""
class build_installer(py2exe):
# This class first builds the exe file(s), then creates a Windows installer.
# You need NSIS (Nullsoft Scriptable Install System) for it.
def run(self):
# Clean up
os.system("del /S /Q dist")
# First, let py2exe do it's work.
py2exe.run(self)
lib_dir = self.lib_dir
dist_dir = self.dist_dir
comserver_files = self.comserver_files
metadata = self.distribution.metadata
# create the Installer, using the files py2exe has created.
script = NSISScript(metadata,
lib_dir,
dist_dir,
self.windows_exe_files,
self.lib_files,
comserver_files)
print "*** creating the nsis script***"
script.create()
print "*** compiling the nsis script***"
script.compile()
# Note: By default the final setup.exe will be in an Output subdirectory.
class NSISScript:
def __init__(self,
metadata,
lib_dir,
dist_dir,
windows_exe_files = [],
lib_files = [],
comserver_files = []):
self.lib_dir = lib_dir
self.dist_dir = dist_dir
if not self.dist_dir[-1] in "\\/":
self.dist_dir += "\\"
self.name = metadata.get_name()
self.description = metadata.get_name()
self.version = metadata.get_version()
self.copyright = metadata.get_author()
self.url = metadata.get_url()
self.windows_exe_files = [self.chop(p) for p in windows_exe_files]
self.lib_files = [self.chop(p) for p in lib_files]
self.comserver_files = [self.chop(p) for p in comserver_files if p.lower().endswith(".dll")]
def chop(self, pathname):
#print pathname, self.dist_dir
#assert pathname.startswith(self.dist_dir)
return pathname[len(self.dist_dir):]
def create(self, pathname="base.nsi"):
self.pathname = pathname
ofi = self.file = open(pathname, "w")
ver = self.version
if "-" in ver:
ver = ver[:ver.index("-")]
rev = self.version.endswith("-full") and ".1" or ".0"
ver= [c in '0123456789.' and c or ".%s" % (ord(c)-96) for c in ver]+[rev]
ofi.write(nsi_base_script % {
'name': self.name,
'description': "%s version %s" % (self.description, self.version),
'product_version': ''.join(ver),
'company_name': self.url,
'copyright': self.copyright,
'install_dir': self.name,
'reg_key': self.name,
'out_file': "instalador-%s-%s.exe" % (self.name, self.version),
'register_com_servers': ''.join([register_com_server % comserver for comserver in self.comserver_files]),
'unregister_com_servers': ''.join([unregister_com_server % comserver for comserver in self.comserver_files]),
})
def compile(self, pathname="base.nsi"):
os.startfile(pathname, 'compile')
class Target():
def __init__(self, module, **kw):
self.__dict__.update(kw)
# for the version info resources (Properties -- Version)
# convertir 1.21a en 1.21.1
self.version = module.__version__[:-1]+"."+str(ord(module.__version__[-1])-96)
self.description = module.__doc__
self.company_name = "Sistemas Agiles"
self.copyright = module.__copyright__
self.name = "Interfaz PyAfipWs - %s" % os.path.basename(module.__file__).replace(".pyc", ".py") | Python |
import wsaa
import os,sys
from subprocess import Popen, PIPE
from base64 import b64encode
def sign_tra(tra,cert,privatekey):
"Firmar PKCS#7 el TRA y devolver CMS (recortando los headers SMIME)"
# Firmar el texto (tra)
out = Popen(["openssl", "smime", "-sign",
"-signer", cert, "-inkey", privatekey,
"-outform","DER",
"-out", "cms.bin" , "-nodetach"],
stdin=PIPE,stdout=PIPE).communicate(tra)[0]
out = open("cms.bin","rb").read()
return b64encode(out)
tra = wsaa.create_tra("wsfex")
print tra
cms = sign_tra(tra,"reingart.crt","reingart.key")
print cms
open("tra.cms","w").write(cms)
ta = wsaa.call_wsaa(cms)
print ta
open("TA.xml","w").write(ta)
| Python |
#!usr/bin/python
# -*- coding: utf-8-*-
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTIBILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
"Aplicativo AdHoc Para generación de Facturas Electrónicas"
__author__ = "Mariano Reingart (reingart@gmail.com)"
__copyright__ = "Copyright (C) 2009 Mariano Reingart"
__license__ = "GPL 3.0"
__version__ = "1.24i"
from datetime import datetime
from decimal import Decimal, getcontext, ROUND_DOWN
import os
import sys
import wx
from PythonCard import dialog, model
import traceback
from ConfigParser import SafeConfigParser
import wsaa, wsfe, wsfev1
from php import SimpleXMLElement, SoapClient, SoapFault, date
from email.mime.text import MIMEText
from email.mime.application import MIMEApplication
from email.mime.multipart import MIMEMultipart
from smtplib import SMTP
#from PyFPDF.ejemplos.form import Form
from pyfepdf import FEPDF
# Formatos de archivos:
import formato_xml
import formato_csv
import formato_dbf
import formato_txt
import formato_json
HOMO = False
DEBUG = '--debug' in sys.argv
CONFIG_FILE = "rece.ini"
ACERCA_DE = u"""
PyRece: Aplicativo AdHoc para generar Facturas Electrónicas
Copyright (C) 2008/2009/2010/2011 Mariano Reingart reingart@gmail.com
Este progarma es software libre, se entrega ABSOLUTAMENTE SIN GARANTIA
y es bienvenido a redistribuirlo bajo la licencia GPLv3.
Para información adicional y descargas ver:
http://www.sistemasagiles.com.ar/
"""
INSTRUCTIVO = U"""
Forma de uso:
* Examinar: para buscar el archivo a procesar (opcional)
* Cargar: para leer los datos del archivo de facturas a procesar
* Autenticar: para iniciar la sesión en los servidores de AFIP (obligatorio antes de autorizar)
* Marcar Todo: para seleccionar todas las facturas
* Autorizar: para autorizar las facturas seleccionadas, completando el CAE y demás datos
* Autorizar Lote: para autorizar en un solo lote las facturas seleccionadas
* Grabar: para almacenar los datos procesados en el archivo de facturas
* Previsualizar: para ver por pantalla la factura seleccionadas
* Enviar: para envia por correo electrónico las facturas seleccionadas
Para solicitar soporte comercial, escriba a pyrece@sistemasagiles.com.ar
"""
def digito_verificador_modulo10(codigo):
"Rutina para el cálculo del dígito verificador 'módulo 10'"
# http://www.consejo.org.ar/Bib_elect/diciembre04_CT/documentos/rafip1702.htm
# Etapa 1: comenzar desde la izquierda, sumar todos los caracteres ubicados en las posiciones impares.
codigo = codigo.strip()
if not codigo or not codigo.isdigit():
return ''
etapa1 = sum([int(c) for i,c in enumerate(codigo) if not i%2])
# Etapa 2: multiplicar la suma obtenida en la etapa 1 por el número 3
etapa2 = etapa1 * 3
# Etapa 3: comenzar desde la izquierda, sumar todos los caracteres que están ubicados en las posiciones pares.
etapa3 = sum([int(c) for i,c in enumerate(codigo) if i%2])
# Etapa 4: sumar los resultados obtenidos en las etapas 2 y 3.
etapa4 = etapa2 + etapa3
# Etapa 5: buscar el menor número que sumado al resultado obtenido en la etapa 4 dé un número múltiplo de 10. Este será el valor del dígito verificador del módulo 10.
digito = 10 - (etapa4 - (int(etapa4 / 10) * 10))
if digito == 10:
digito = 0
return str(digito)
class PyRece(model.Background):
def on_initialize(self, event):
self.cols = []
self.items = []
self.paths = [entrada]
self.token = self.sign = ""
self.smtp = None
self.webservice = None
if entrada and os.path.exists(entrada):
self.cargar()
self.components.cboWebservice.stringSelection = "wsfev1"
self.on_cboWebservice_select(event)
self.tipos = {
1:u"Factura A",
2:u"Notas de Débito A",
3:u"Notas de Crédito A",
4:u"Recibos A",
5:u"Notas de Venta al contado A",
6:u"Facturas B",
7:u"Notas de Débito B",
8:u"Notas de Crédito B",
9:u"Recibos B",
10:u"Notas de Venta al contado B",
39:u"Otros comprobantes A que cumplan con la R.G. N° 3419",
40:u"Otros comprobantes B que cumplan con la R.G. N° 3419",
60:u"Cuenta de Venta y Líquido producto A",
61:u"Cuenta de Venta y Líquido producto B",
63:u"Liquidación A",
64:u"Liquidación B",
11:u"Factura C",
12:u"Nota de Débito C",
13:u"Nota de Crédito C",
15:u"Recibo C",
}
# deshabilito ordenar
self.components.lvwListado.GetColumnSorter = lambda: lambda x,y: 0
def set_cols(self, cols):
self.__cols = cols
self.components.lvwListado.columnHeadings = [col.replace("_"," ").title() for col in cols]
def get_cols(self):
return self.__cols
cols = property(get_cols, set_cols)
def set_items(self, items):
cols = self.cols
self.__items = items
def convert_str(value):
if value is None:
return ''
elif isinstance(value, str):
return unicode(value,'latin1')
elif isinstance(value, unicode):
return value
else:
return str(value)
self.components.lvwListado.items = [[convert_str(item[col]) for col in cols] for item in items]
wx.SafeYield()
def get_items(self):
return self.__items
items = property(get_items, set_items)
def get_selected_items(self):
itemidx = -1
itemidx = self.components.lvwListado.GetNextItem(itemidx, wx.LIST_NEXT_ALL, wx.LIST_STATE_SELECTED)
while itemidx >= 0:
yield itemidx, self.__items[itemidx]
itemidx = self.components.lvwListado.GetNextItem(itemidx, wx.LIST_NEXT_ALL, wx.LIST_STATE_SELECTED)
def set_selected_items(self, selected):
for itemidx in selected:
self.components.lvwListado.Select(itemidx, on=True)
def set_paths(self, paths):
self.__paths = paths
self.components.txtArchivo.text = ', '.join([fn for fn in paths])
def get_paths(self):
return self.__paths
paths = property(get_paths, set_paths)
def log(self, msg):
if not isinstance(msg, unicode):
msg = unicode(msg, "latin1","ignore")
self.components.txtEstado.text = msg + u"\n" + self.components.txtEstado.text
wx.SafeYield()
f = None
try:
f = open("pyrece.log","a")
f.write("%s: " % (datetime.now(), ))
f.write(msg.encode("ascii", "ignore"))
f.write("\n\r")
except Exception, e:
print e
finally:
if f:
f.close()
def progreso(self, value):
if self.items:
per = (value+1)/float(len(self.items))*100
self.components.pbProgreso.value = per
wx.SafeYield()
def error(self, code, text):
ex = traceback.format_exception( sys.exc_type, sys.exc_value, sys.exc_traceback)
self.log(''.join(ex))
dialog.alertDialog(self, text, 'Error %s' % code)
def verifica_ws(self):
if not self.ws:
dialog.alertDialog(self, "Debe seleccionar el webservice a utilizar!", 'Advertencia')
raise RuntimeError()
if not self.token or not self.sign:
dialog.alertDialog(self, "Debe autenticarse con AFIP!", 'Advertencia')
raise RuntimeError()
def on_btnMarcarTodo_mouseClick(self, event):
for i in range(len(self.__items)):
self.components.lvwListado.SetSelection(i)
def on_menuConsultasDummy_select(self, event):
self.verifica_ws()
try:
if self.webservice=="wsfe":
results = self.client.FEDummy()
msg = "AppServ %s\nDbServer %s\nAuthServer %s" % (
results.appserver, results.dbserver, results.authserver)
location = self.ws.client.location
elif self.webservice=="wsfev1":
self.ws.Dummy()
msg = "AppServ %s\nDbServer %s\nAuthServer %s" % (
self.ws.AppServerStatus, self.ws.DbServerStatus, self.ws.AuthServerStatus)
location = self.ws.client.location
dialog.alertDialog(self, msg, location)
except Exception, e:
self.error(u'Excepción',unicode(str(e),"latin1","ignore"))
def on_menuConsultasLastCBTE_select(self, event):
self.verifica_ws()
result = dialog.singleChoiceDialog(self, "Tipo de comprobante",
u"Consulta Último Nro. Comprobante",
[v for k,v in sorted([(k,v) for k,v in self.tipos.items()])])
if not result.accepted:
return
tipocbte = [k for k,v in self.tipos.items() if v==result.selection][0]
result = dialog.textEntryDialog(self, u"Punto de venta",
u"Consulta Último Nro. Comprobante", '2')
if not result.accepted:
return
ptovta = result.text
try:
if self.webservice=="wsfe":
ultcmp = wsfe.recuperar_last_cmp(self.client, self.token, self.sign,
cuit, ptovta, tipocbte)
elif self.webservice=="wsfev1":
ultcmp = "%s (wsfev1)" % self.ws.CompUltimoAutorizado(tipocbte, ptovta)
dialog.alertDialog(self, u"Último comprobante: %s\n"
u"Tipo: %s (%s)\nPunto de Venta: %s" % (ultcmp, self.tipos[tipocbte],
tipocbte, ptovta), u'Consulta Último Nro. Comprobante')
except SoapFault,e:
self.log(self.client.xml_request)
self.log(self.client.xml_response)
self.error(e.faultcode, e.faultstring.encode("ascii","ignore"))
except wsfe.WSFEError,e:
self.error(e.code, e.msg.encode("ascii","ignore"))
except Exception, e:
self.error(u'Excepción',unicode(str(e),"latin1","ignore"))
def on_menuConsultasGetCAE_select(self, event):
self.verifica_ws()
result = dialog.singleChoiceDialog(self, "Tipo de comprobante",
u"Consulta Comprobante",
[v for k,v in sorted([(k,v) for k,v in self.tipos.items()])])
if not result.accepted:
return
tipocbte = [k for k,v in self.tipos.items() if v==result.selection][0]
result = dialog.textEntryDialog(self, u"Punto de venta",
u"Consulta Comprobante", '2')
if not result.accepted:
return
ptovta = result.text
result = dialog.textEntryDialog(self, u"Nº de comprobante",
u"Consulta Comprobante", '2')
if not result.accepted:
return
nrocbte = result.text
try:
if self.webservice=="wsfe":
cae = 'no soportado'
elif self.webservice=="wsfev1":
cae = "%s (wsfev1)" % self.ws.CompConsultar(tipocbte, ptovta, nrocbte)
self.log('CAE: %s' % self.ws.CAE)
self.log('FechaCbte: %s' % self.ws.FechaCbte)
self.log('PuntoVenta: %s' % self.ws.PuntoVenta)
self.log('CbteNro: %s' % self.ws.CbteNro)
self.log('ImpTotal: %s' % self.ws.ImpTotal)
self.log('ImpNeto: %s' % self.ws.ImpNeto)
self.log('ImptoLiq: %s' % self.ws.ImptoLiq)
self.log('EmisionTipo: %s' % self.ws.EmisionTipo)
dialog.alertDialog(self, u"CAE: %s\n"
u"Tipo: %s (%s)\nPunto de Venta: %s\nNumero: %s\nFecha: %s" % (
cae, self.tipos[tipocbte],
tipocbte, ptovta, nrocbte, self.ws.FechaCbte),
u'Consulta Comprobante')
except SoapFault,e:
self.log(self.client.xml_request)
self.log(self.client.xml_response)
self.error(e.faultcode, e.faultstring.encode("ascii","ignore"))
except wsfe.WSFEError,e:
self.error(e.code, e.msg.encode("ascii","ignore"))
except Exception, e:
self.error(u'Excepción',unicode(str(e),"latin1","ignore"))
def on_menuConsultasLastID_select(self, event):
self.verifica_ws()
try:
ultnro = wsfe.ultnro(self.client, self.token, self.sign, cuit)
dialog.alertDialog(self, u"Último ID (máximo): %s" % (ultnro),
u'Consulta Último ID')
except SoapFault,e:
self.log(self.client.xml_request)
self.log(self.client.xml_response)
self.error(e.faultcode, e.faultstring.encode("ascii","ignore"))
except wsfe.WSFEError,e:
self.error(e.code, e.msg.encode("ascii","ignore"))
except Exception, e:
self.error(u'Excepción',unicode(e))
def on_menuAyudaAcercaDe_select(self, event):
text = ACERCA_DE
dialog.alertDialog(self, text, u'Acerca de PyRece Versión %s' % __version__)
def on_menuAyudaInstructivo_select(self, event):
text = INSTRUCTIVO
dialog.alertDialog(self, text, u'Instructivo de PyRece')
def on_menuAyudaLimpiar_select(self, event):
self.components.txtEstado.text = ""
def on_menuAyudaMensajesXML_select(self, event):
self.verifica_ws()
self.components.txtEstado.text = u"XmlRequest:\n%s\n\nXmlResponse:\n%s" % (
self.ws.xml_request, self.ws.xml_response)
self.size = (592, 517)
def on_menuAyudaVerEstado_select(self, event):
if self.size[1]<517:
self.size = (592, 517)
else:
self.size = (592, 265)
def on_menuAyudaVerConfiguracion_select(self, event):
self.components.txtEstado.text = open(CONFIG_FILE).read()
self.size = (592, 517)
def on_cboWebservice_select(self, event):
self.webservice = self.components.cboWebservice.stringSelection
self.ws = None
self.token = None
self.sign = None
if self.webservice == "wsfe":
self.client = SoapClient(wsfe_url, action=wsfe.SOAP_ACTION, namespace=wsfe.SOAP_NS,
trace=False, exceptions=True)
elif self.webservice == "wsfev1":
self.ws = wsfev1.WSFEv1()
def on_btnAutenticar_mouseClick(self, event):
try:
if self.webservice in ('wsfe', ):
service = "wsfe"
elif self.webservice in ('wsfev1', ):
self.log("Conectando WSFEv1... " + wsfev1_url)
self.ws.Conectar("",wsfev1_url, proxy_dict)
self.ws.Cuit = cuit
service = "wsfe"
elif self.webservice in ('wsfex', ):
service = "wsfex"
else:
dialog.alertDialog(self, 'Debe seleccionar servicio web!', 'Advertencia')
return
self.log("Creando TRA %s ..." % service)
ws = wsaa.WSAA()
tra = ws.CreateTRA(service)
self.log("Frimando TRA (CMS) con %s %s..." % (str(cert),str(privatekey)))
cms = ws.SignTRA(str(tra),str(cert),str(privatekey))
self.log("Llamando a WSAA... " + wsaa_url)
ws.Conectar("", wsdl=wsaa_url, proxy=proxy_dict)
self.log("Proxy: %s" % proxy_dict)
xml = ws.LoginCMS(str(cms))
self.log("Procesando respuesta...")
if xml:
self.token = ws.Token
self.sign = ws.Sign
if DEBUG:
self.log("Token: %s" % self.token)
self.log("Sign: %s" % self.sign)
elif self.token and self.sign:
self.log("Token: %s... OK" % self.token[:10])
self.log("Sign: %s... OK" % self.sign[:10])
if self.webservice == "wsfev1":
self.ws.Token = self.token
self.ws.Sign = self.sign
if xml:
dialog.alertDialog(self, 'Autenticado OK!', 'Advertencia')
else:
dialog.alertDialog(self, u'Respuesta: %s' % ws.XmlResponse, u'No se pudo autenticar: %s' % ws.Excepcion)
except SoapFault,e:
self.error(e.faultcode, e.faultstring.encode("ascii","ignore"))
except Exception, e:
self.error(u'Excepción',unicode(e))
def examinar(self):
filename = entrada
wildcard = ["Archivos CSV (*.csv)|*.csv", "Archivos XML (*.xml)|*.xml",
"Archivos TXT (*.txt)|*.txt", "Archivos DBF (*.dbf)|*.dbf",
"Archivos JSON (*.json)|*.json",
]
if entrada.endswith("xml"):
wildcard.sort(reverse=True)
result = dialog.fileDialog(self, 'Abrir', '', filename, '|'.join(wildcard))
if not result.accepted:
return
self.paths = result.paths
def on_menuArchivoAbrir_select(self, event):
self.examinar()
self.cargar()
def on_menuArchivoCargar_select(self, event):
self.cargar()
def cargar(self):
try:
items = []
for fn in self.paths:
if fn.lower().endswith(".csv"):
filas = formato_csv.leer(fn)
items.extend(filas)
elif fn.lower().endswith(".xml"):
regs = formato_xml.leer(fn)
items.extend(formato_csv.aplanar(regs))
elif fn.lower().endswith(".txt"):
regs = formato_txt.leer(fn)
items.extend(formato_csv.aplanar(regs))
elif fn.lower().endswith(".dbf"):
reg = formato_dbf.leer({'Encabezado': fn})
items.extend(formato_csv.aplanar(reg.values()))
elif fn.lower().endswith(".json"):
regs = formato_json.leer(fn)
items.extend(formato_csv.aplanar(regs))
else:
self.error(u'Formato de archivo desconocido: %s' % unicode(fn))
if len(items) < 2:
dialog.alertDialog(self, u'El archivo no tiene datos válidos', 'Advertencia')
cols = items and [str(it).strip() for it in items[0]] or []
if DEBUG: print "Cols",cols
# armar diccionario por cada linea
items = [dict([(cols[i],v) for i,v in enumerate(item)]) for item in items[1:]]
self.cols = cols
self.items = items
except Exception,e:
self.error(u'Excepción',unicode(e))
def on_menuArchivoGuardar_select(self, event):
filename = entrada
wildcard = ["Archivos CSV (*.csv)|*.csv", "Archivos XML (*.xml)|*.xml",
"Archivos TXT (*.txt)|*.txt", "Archivos DBF (*.dbf)|*.dbf",
"Archivos JSON (*.json)|*.json",
]
if entrada.endswith("xml"):
wildcard.sort(reverse=True)
if self.paths:
path = self.paths[0]
else:
path = salida
result = dialog.saveFileDialog(self, title='Guardar', filename=path,
wildcard='|'.join(wildcard))
if not result.accepted:
return
fn = result.paths[0]
self.grabar(fn)
def grabar(self, fn=None):
try:
if fn is None and salida:
if salida.startswith("-") and self.paths:
fn = os.path.splitext(self.paths[0])[0] + salida
else:
fn = salida
elif not fn:
raise RuntimeError("Debe indicar un nombre de archivo para grabar")
if fn.endswith(".csv"):
formato_csv.escribir([self.cols] + [[item[k] for k in self.cols] for item in self.items], fn)
else:
regs = formato_csv.desaplanar([self.cols] + [[item[k] for k in self.cols] for item in self.items])
if fn.endswith(".xml"):
formato_xml.escribir(regs, fn)
elif fn.endswith(".txt"):
formato_txt.escribir(regs, fn)
elif fn.endswith(".dbf"):
formato_dbf.escribir(regs, {'Encabezado': fn})
elif fn.endswith(".json"):
formato_json.escribir(regs, fn)
else:
self.error(u'Formato de archivo desconocido: %s' % unicode(fn))
dialog.alertDialog(self, u'Se guardó con éxito el archivo:\n%s' % (unicode(fn),), 'Guardar')
except Exception, e:
self.error(u'Excepción',unicode(e))
def on_menuArchivoDiseniador_select(self, event):
# TODO: no funciona porque PythonCard aparentemente no importa el mismo namespace de wx
from pyfpdf_hg.designer import AppFrame
frame = AppFrame()
frame.Show(1)
def on_btnAutorizar_mouseClick(self, event):
self.verifica_ws()
try:
ok = procesadas = rechazadas = 0
cols = self.cols
items = []
self.progreso(0)
selected = []
for i, item in self.get_selected_items():
kargs = item.copy()
selected.append(i)
kargs['cbt_desde'] = kargs['cbt_hasta'] = kargs ['cbt_numero']
for key in kargs:
if isinstance(kargs[key], basestring):
kargs[key] = kargs[key].replace(",",".")
if self.webservice == 'wsfe':
if 'id' not in kargs or kargs['id'] == "":
id = long(kargs['cbt_desde'])
id += (int(kargs['tipo_cbte'])*10**4 + int(kargs['punto_vta']))*10**8
kargs['id'] = id
if DEBUG:
self.log('\n'.join(["%s='%s'" % (k,v) for k,v in kargs.items()]))
if not cuit in kargs:
kargs['cuit'] = cuit
ret = wsfe.aut(self.client, self.token, self.sign, **kargs)
kargs.update(ret)
del kargs['cbt_desde']
del kargs['cbt_hasta']
elif self.webservice == 'wsfev1':
encabezado = {}
for k in ('concepto', 'tipo_doc', 'nro_doc', 'tipo_cbte', 'punto_vta',
'cbt_desde', 'cbt_hasta', 'imp_total', 'imp_tot_conc', 'imp_neto',
'imp_iva', 'imp_trib', 'imp_op_ex', 'fecha_cbte',
'moneda_id', 'moneda_ctz'):
encabezado[k] = kargs[k]
for k in ('fecha_venc_pago', 'fecha_serv_desde', 'fecha_serv_hasta'):
if k in kargs:
encabezado[k] = kargs.get(k)
self.ws.CrearFactura(**encabezado)
for l in range(1,1000):
k = 'tributo_%%s_%s' % l
if (k % 'id') in kargs:
id = kargs[k % 'id']
desc = kargs[k % 'desc']
base_imp = kargs[k % 'base_imp']
alic = kargs[k % 'alic']
importe = kargs[k % 'importe']
if id:
self.ws.AgregarTributo(id, desc, base_imp, alic, importe)
else:
break
for l in range(1,1000):
k = 'iva_%%s_%s' % l
if (k % 'id') in kargs:
id = kargs[k % 'id']
base_imp = kargs[k % 'base_imp']
importe = kargs[k % 'importe']
if id:
self.ws.AgregarIva(id, base_imp, importe)
else:
break
for l in range(1,1000):
k = 'cbte_asoc_%%s_%s' % l
if (k % 'tipo') in kargs:
tipo = kargs[k % 'tipo']
pto_vta = kargs[k % 'pto_vta']
nro = kargs[k % 'nro']
if id:
self.ws.AgregarCmpAsoc(tipo, pto_vta, nro)
else:
break
if DEBUG:
self.log('\n'.join(["%s='%s'" % (k,v) for k,v in self.ws.factura.items()]))
cae = self.ws.CAESolicitar()
kargs.update({
'cae': self.ws.CAE,
'fecha_vto': self.ws.Vencimiento,
'resultado': self.ws.Resultado,
'motivo': self.ws.Obs,
'reproceso': self.ws.Reproceso,
'err_code': self.ws.ErrCode.encode("latin1"),
'err_msg': self.ws.ErrMsg.encode("latin1"),
})
if self.ws.ErrMsg:
dialog.alertDialog(self, self.ws.ErrMsg, "Error AFIP")
if self.ws.Obs and self.ws.Obs!='00':
dialog.alertDialog(self, self.ws.Obs, u"Observación AFIP")
# actuaizo la factura
for k in ('cae', 'fecha_vto', 'resultado', 'motivo', 'reproceso', 'err_code', 'err_msg'):
if kargs.get(k):
item[k] = kargs[k]
self.items[i] = item
self.log(u"ID: %s CAE: %s Motivo: %s Reproceso: %s" % (kargs['id'], kargs['cae'], kargs['motivo'],kargs['reproceso']))
procesadas += 1
if kargs['resultado'] == "R":
rechazadas += 1
elif kargs['resultado'] == "A":
ok += 1
self.progreso(i)
self.items = self.items
self.set_selected_items(selected)
self.progreso(len(self.items))
dialog.alertDialog(self, u'Proceso finalizado, procesadas %d\n\n'
'Aceptadas: %d\n'
'Rechazadas: %d' % (procesadas, ok, rechazadas),
u'Autorización')
self.grabar()
except (SoapFault, wsfev1.SoapFault),e:
self.error(e.faultcode, e.faultstring.encode("ascii","ignore"))
except wsfe.WSFEError,e:
self.error(e.code, e.msg.encode("ascii","ignore"))
except KeyError, e:
self.error("Error",u'Campo obligatorio no encontrado: %s' % e)
except Exception, e:
self.error(u'Excepción',unicode(e))
finally:
if DEBUG:
if self.webservice == 'wsfev1' and DEBUG:
print self.ws.XmlRequest
print self.ws.XmlResponse
def on_btnAutorizarLote_mouseClick(self, event):
self.verifica_ws()
if not self.items: return
try:
#getcontext().prec = 2
ok = 0
rechazadas = 0
cols = self.cols
items = []
self.progreso(0)
cbt_desde = cbt_hasta = None
datos = {
'tipo_cbte': None,
'punto_vta': None,
'fecha_cbte': None,
'fecha_venc_pago': None,
'fecha_cbte': None,
'fecha_venc_pago': None,
'fecha_serv_desde': None,
'fecha_serv_hasta': None,
'moneda_id': None,
'moneda_ctz': None,
'id': None,
}
importes = {
'imp_total': Decimal(0),
'imp_tot_conc': Decimal(0),
'imp_neto': Decimal(0),
'imp_iva':Decimal(0),
'imp_op_ex': Decimal(0),
'imp_trib': Decimal(0),
}
for l in range(1,5):
k = 'iva_%%s_%s' % l
datos[k % 'id'] = None
importes[k % 'base_imp'] = Decimal(0)
importes[k % 'importe'] = Decimal(0)
for l in range(1,10):
k = 'tributo_%%s_%s' % l
datos[k % 'id'] = None
datos[k % 'desc'] = None
importes[k % 'base_imp'] = Decimal(0)
datos[k % 'alic'] = None
importes[k % 'importe'] = Decimal(0)
for i, item in self.get_selected_items():
if cbt_desde is None or int(item['cbt_numero']) < cbt_desde:
cbt_desde = int(item['cbt_numero'])
if cbt_hasta is None or int(item['cbt_numero']) > cbt_hasta:
cbt_hasta = int(item['cbt_numero'])
for key in item:
if key in datos:
if datos[key] is None:
datos[key] = item[key]
elif datos[key] != item[key]:
raise RuntimeError(u"%s tiene valores distintos en el lote!" % key)
if key in importes and item[key]:
importes[key] = importes[key] + Decimal("%.2f" % float(str(item[key].replace(",","."))))
kargs = {'cbt_desde': cbt_desde, 'cbt_hasta': cbt_hasta}
kargs.update({'tipo_doc': 99, 'nro_doc': '0'})
kargs.update(datos)
kargs.update(importes)
if kargs['fecha_serv_desde'] and kargs['fecha_serv_hasta']:
kargs['presta_serv'] = 1
kargs['concepto'] = 2
else:
kargs['presta_serv'] = 0
kargs['concepto'] = 1
del kargs['fecha_serv_desde']
del kargs['fecha_serv_hasta']
for key, val in importes.items():
importes[key] = val.quantize(Decimal('.01'), rounding=ROUND_DOWN)
if 'id' not in kargs or kargs['id'] == "":
id = long(kargs['cbt_desde'])
id += (int(kargs['tipo_cbte'])*10**4 + int(kargs['punto_vta']))*10**8
kargs['id'] = id
if DEBUG:
self.log('\n'.join(["%s='%s'" % (k,v) for k,v in kargs.items()]))
if '--test' in sys.argv:
kargs['cbt_desde'] = 777
kargs['fecha_cbte'] = '20110802'
kargs['fecha_venc_pago'] = '20110831'
if dialog.messageDialog(self, "Confirma Lote:\n"
"Tipo: %(tipo_cbte)s Desde: %(cbt_desde)s Hasta %(cbt_hasta)s\n"
"Neto: %(imp_neto)s IVA: %(imp_iva)s Trib.: %(imp_trib)s Total: %(imp_total)s"
% kargs, "Autorizar lote:").accepted:
if self.webservice == 'wsfev1':
encabezado = {}
for k in ('concepto', 'tipo_doc', 'nro_doc', 'tipo_cbte', 'punto_vta',
'cbt_desde', 'cbt_hasta', 'imp_total', 'imp_tot_conc', 'imp_neto',
'imp_iva', 'imp_trib', 'imp_op_ex', 'fecha_cbte',
'moneda_id', 'moneda_ctz'):
encabezado[k] = kargs[k]
for k in ('fecha_venc_pago', 'fecha_serv_desde', 'fecha_serv_hasta'):
if k in kargs:
encabezado[k] = kargs.get(k)
self.ws.CrearFactura(**encabezado)
for l in range(1,1000):
k = 'iva_%%s_%s' % l
if (k % 'id') in kargs:
id = kargs[k % 'id']
base_imp = kargs[k % 'base_imp']
importe = kargs[k % 'importe']
if id:
self.ws.AgregarIva(id, base_imp, importe)
else:
break
for l in range(1,1000):
k = 'tributo_%%s_%s' % l
if (k % 'id') in kargs:
id = kargs[k % 'id']
desc = kargs[k % 'desc']
base_imp = kargs[k % 'base_imp']
alic = kargs[k % 'alic']
importe = kargs[k % 'importe']
if id:
self.ws.AgregarTributo(id, desc, base_imp, alic, importe)
else:
break
if DEBUG:
self.log('\n'.join(["%s='%s'" % (k,v) for k,v in self.ws.factura.items()]))
cae = self.ws.CAESolicitar()
kargs.update({
'cae': self.ws.CAE,
'fecha_vto': self.ws.Vencimiento,
'resultado': self.ws.Resultado,
'motivo': self.ws.Obs,
'reproceso': self.ws.Reproceso,
'err_code': self.ws.ErrCode.encode("latin1"),
'err_msg': self.ws.ErrMsg.encode("latin1"),
})
if self.ws.ErrMsg:
dialog.alertDialog(self, self.ws.ErrMsg, "Error AFIP")
if self.ws.Obs and self.ws.Obs!='00':
dialog.alertDialog(self, self.ws.Obs, u"Observación AFIP")
for i, item in self.get_selected_items():
for key in ('id', 'cae', 'fecha_vto', 'resultado', 'motivo', 'reproceso', 'err_code', 'err_msg'):
item[key] = kargs[key]
self.log("ID: %s CAE: %s Motivo: %s Reproceso: %s" % (kargs['id'], kargs['cae'], kargs['motivo'],kargs['reproceso']))
if kargs['resultado'] == "R":
rechazadas += 1
elif kargs['resultado'] == "A":
ok += 1
self.items = self.items # refrescar, ver de corregir
self.progreso(len(self.items))
dialog.alertDialog(self, 'Proceso finalizado OK!\n\nAceptadas: %d\nRechazadas: %d' % (ok, rechazadas), 'Autorización')
self.grabar()
except SoapFault,e:
self.log(self.client.xml_request)
self.log(self.client.xml_response)
self.error(e.faultcode, e.faultstring.encode("ascii","ignore"))
except wsfe.WSFEError,e:
self.error(e.code, e.msg.encode("ascii","ignore"))
except Exception, e:
self.error(u'Excepción',unicode(e))
def on_btnPrevisualizar_mouseClick(self, event):
try:
j = 0
for i, item in self.get_selected_items():
j += 1
archivo = self.generar_factura(item, mostrar=(j==1))
except Exception, e:
print e
self.error(u'Excepción', unicode(str(e), 'latin1', 'ignore'))
def on_btnGenerar_mouseClick(self, event):
for item in self.items:
archivo = self.generar_factura(item)
def on_btnEnviar_mouseClick(self, event):
try:
ok = no = 0
self.progreso(0)
for i, item in self.get_selected_items():
if not item['cae'] in ("", "NULL"):
archivo = self.generar_factura(item)
if item.get('email'):
self.enviar_mail(item,archivo)
ok += 1
else:
no += 1
self.log("No se envia factura %s por no tener EMAIL" % item['cbt_numero'])
else:
self.log("No se envia factura %s por no tener CAE" % item['cbt_numero'])
no += 1
self.progreso(i)
self.progreso(len(self.items))
dialog.alertDialog(self, 'Proceso finalizado OK!\n\nEnviados: %d\nNo enviados: %d' % (ok, no), 'Envio de Email')
except Exception, e:
self.error(u'Excepción',unicode(e))
def generar_factura(self, fila, mostrar=False):
fepdf = FEPDF()
fact = formato_csv.desaplanar([self.cols] + [[item[k] for k in self.cols] for item in [fila]])[0]
fact['cbte_nro'] = fact['cbt_numero']
fact['items'] = fact['detalles']
for d in fact['datos']:
fepdf.AgregarDato(d['campo'], d['valor'], d['pagina'])
# por compatiblidad, completo campos anteriores
if d['campo'] not in fact and d['valor']:
fact[d['campo']] = d['valor']
fepdf.factura = fact
# cargo el formato CSV por defecto (factura.csv)
fepdf.CargarFormato(conf_fact.get("formato", "factura.csv"))
# datos fijos:
fepdf.CUIT = cuit # CUIT del emisor para código de barras
for k, v in conf_pdf.items():
fepdf.AgregarDato(k, v)
fepdf.CrearPlantilla(papel=conf_fact.get("papel", "legal"),
orientacion=conf_fact.get("orientacion", "portrait"))
fepdf.ProcesarPlantilla(num_copias=int(conf_fact.get("copias", 1)),
lineas_max=int(conf_fact.get("lineas_max", 24)),
qty_pos=conf_fact.get("cant_pos") or 'izq')
salida = conf_fact.get("salida", "")
fact = fepdf.factura
if salida:
pass
elif 'pdf' in fact and fact['pdf']:
salida = fact['pdf']
else:
# genero el nombre de archivo según datos de factura
d = conf_fact.get('directorio', ".")
clave_subdir = conf_fact.get('subdirectorio','fecha_cbte')
if clave_subdir:
d = os.path.join(d, item[clave_subdir])
if not os.path.isdir(d):
os.mkdir(d)
fs = conf_fact.get('archivo','numero').split(",")
it = item.copy()
tipo_fact, letra_fact, numero_fact = fact['_fmt_fact']
it['tipo'] = tipo_fact.replace(" ", "_")
it['letra'] = letra_fact
it['numero'] = numero_fact
it['mes'] = item['fecha_cbte'][4:6]
it['año'] = item['fecha_cbte'][0:4]
fn = ''.join([str(it.get(ff,ff)) for ff in fs])
fn = fn.decode('latin1').encode('ascii', 'replace').replace('?','_')
salida = os.path.join(d, "%s.pdf" % fn)
fepdf.GenerarPDF(archivo=salida)
if mostrar:
fepdf.MostrarPDF(archivo=salida,imprimir='--imprimir' in sys.argv)
return salida
def enviar_mail(self, item, archivo):
archivo = self.generar_factura(item)
if item['email']:
msg = MIMEMultipart()
msg['Subject'] = conf_mail['motivo'].replace("NUMERO",str(item['cbt_numero']))
msg['From'] = conf_mail['remitente']
msg['Reply-to'] = msg['From']
msg['To'] = item['email']
msg.preamble = 'Mensaje de multiples partes.\n'
if not 'html' in conf_mail:
part = MIMEText(conf_mail['cuerpo'])
msg.attach(part)
else:
alt = MIMEMultipart('alternative')
msg.attach(alt)
text = MIMEText(conf_mail['cuerpo'])
alt.attach(text)
# We reference the image in the IMG SRC attribute by the ID we give it below
html = MIMEText(conf_mail['html'], 'html')
alt.attach(html)
part = MIMEApplication(open(archivo,"rb").read())
part.add_header('Content-Disposition', 'attachment', filename=os.path.basename(archivo))
msg.attach(part)
try:
self.log("Enviando email: %s a %s" % (msg['Subject'], msg['To']))
if not self.smtp:
self.smtp = SMTP(conf_mail['servidor'], conf_mail.get('puerto', 25))
if conf_mail['usuario'] and conf_mail['clave']:
self.smtp.ehlo()
self.smtp.login(conf_mail['usuario'], conf_mail['clave'])
self.smtp.sendmail(msg['From'], msg['To'], msg.as_string())
except Exception,e:
self.error(u'Excepción',unicode(e))
if __name__ == '__main__':
if len(sys.argv)>1 and not sys.argv[1].startswith("-"):
CONFIG_FILE = sys.argv[1]
config = SafeConfigParser()
config.read(CONFIG_FILE)
if not len(config.sections()):
if os.path.exists(CONFIG_FILE):
print "Error al cargar datos desde el archivo: ",CONFIG_FILE
else:
print "No se encuentra el archivo: ",CONFIG_FILE
sys.exit(1)
cert = config.get('WSAA','CERT')
privatekey = config.get('WSAA','PRIVATEKEY')
cuit = config.get('WSFEv1','CUIT')
if config.has_option('WSFEv1','ENTRADA'):
entrada = config.get('WSFEv1','ENTRADA')
else:
entrada = ""
if not os.path.exists(entrada):
entrada = "facturas.csv"
if config.has_option('WSFEv1','SALIDA'):
salida = config.get('WSFEv1','SALIDA')
else:
salida = "resultado.csv"
if config.has_section('FACTURA'):
conf_fact = dict(config.items('FACTURA'))
else:
conf_fact = {}
conf_pdf = dict(config.items('PDF'))
conf_mail = dict(config.items('MAIL'))
if config.has_option('WSAA','URL') and not HOMO:
wsaa_url = config.get('WSAA','URL')
else:
wsaa_url = wsaa.WSAAURL
if config.has_option('WSFE','URL') and not HOMO:
wsfe_url = config.get('WSFE','URL')
else:
wsfe_url = wsfe.WSFEURL
if config.has_option('WSFEv1','URL') and not HOMO:
wsfev1_url = config.get('WSFEv1','URL')
else:
wsfev1_url = wsfev1.WSDL
if config.has_section('PROXY'):
proxy_dict = dict(("proxy_%s" % k,v) for k,v in config.items('PROXY'))
proxy_dict['proxy_port'] = int(proxy_dict['proxy_port'])
else:
proxy_dict = {}
app = model.Application(PyRece)
app.MainLoop()
| Python |
# Para hacer el ejecutable:
# python setup.py py2exe
#
"Creador de instalador para PyAfipWs (WSMTXCA)"
__author__ = "Mariano Reingart (mariano@nsis.com.ar)"
__copyright__ = "Copyright (C) 2010 Mariano Reingart"
from distutils.core import setup
import py2exe
import glob, sys
# includes for py2exe
includes=['email.generator', 'email.iterators', 'email.message', 'email.utils']
# don't pull in all this MFC stuff used by the makepy UI.
excludes=["pywin", "pywin.dialogs", "pywin.dialogs.list", "win32ui"]
opts = {
'py2exe': {
'includes':includes,
'optimize':2,
'excludes': excludes,
}}
import wsmtx
from nsis import build_installer
data_files = [
(".", ["wsfev1_wsdl.xml","wsfev1_wsdl_homo.xml", "licencia.txt", 'rece.ini.dist']),
("cache", glob.glob("cache/*")),
]
setup( name = "WSMTXCA",
version=wsmtx.__version__ + (wsmtx.HOMO and '-homo' or '-full'),
description="Interfaz PyAfipWs WSMTXCA %s",
long_description=wsmtx.__doc__,
author="Mariano Reingart",
author_email="reingart@gmail.com",
url="http://www.sistemasagiles.com.ar",
license="GNU GPL v3",
com_server = ["wsmtx"],
console=['wsmtx.py', 'wsaa.py', 'recem.py'],
options=opts,
data_files = data_files,
cmdclass = {"py2exe": build_installer}
) | Python |
#!/usr/bin/python
# -*- coding: utf_8 -*-
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTIBILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
"Módulo de Intefase para archivos de texto (exportación version 1)"
__author__ = "Mariano Reingart (reingart@gmail.com)"
__copyright__ = "Copyright (C) 2011 Mariano Reingart"
__license__ = "GPL 3.0"
__version__ = "1.25b"
import datetime
import os
import sys
import time
import traceback
from ConfigParser import SafeConfigParser
# revisar la instalación de pyafip.ws:
import wsaa, wsfexv1
from php import SimpleXMLElement, SoapClient, SoapFault, date
HOMO = wsfexv1.HOMO
DEBUG = False
XML = False
CONFIG_FILE = "rece.ini"
LICENCIA = """
recex.py: Interfaz de texto para generar Facturas Electrónica Exportación
Copyright (C) 2010 Mariano Reingart reingart@gmail.com
Este progarma es software libre, se entrega ABSOLUTAMENTE SIN GARANTIA
y es bienvenido a redistribuirlo bajo la licencia GPLv3.
Para información adicional sobre garantía, soporte técnico comercial
e incorporación/distribución en programas propietarios ver PyAfipWs:
http://www.sistemasagiles.com.ar/trac/wiki/PyAfipWs
"""
# definición del formato del archivo de intercambio:
N = 'Numerico'
A = 'Alfanumerico'
I = 'Importe'
ENCABEZADO = [
('tipo_reg', 1, N), # 0: encabezado
('fecha_cbte', 8, A),
('tipo_cbte', 2, N), ('punto_vta', 4, N),
('cbte_nro', 8, N),
('tipo_expo', 1, N), # 1:bienes, 2:servicios,...
('permiso_existente', 1, A), # S/N/
('pais_dst_cmp', 3, N), # 203
('nombre_cliente', 200, A), # 'Joao Da Silva'
('cuit_pais_cliente', 11, N), # 50000000016
('domicilio_cliente', 300, A), # 'Rua 76 km 34.5 Alagoas'
('id_impositivo', 50, A), # 'PJ54482221-l'
('imp_total', 15, I, 2),
('moneda_id', 3, A),
('moneda_ctz', 10, I, 6), #10,6
('obs_comerciales', 4000, A),
('obs_generales', 1000, A),
('forma_pago', 50, A),
('incoterms', 3, A),
('incoterms_ds', 20, A),
('idioma_cbte', 1, A),
('cae', 14, N), ('fecha_vto', 8, A),
('resultado', 1, A),
('reproceso', 1, A),
('motivos_obs', 1000, A),
('id', 15, N),
('fch_venc_cae', 8, A),
('excepcion', 100, A),
('err_code', 100, A),
('err_msg', 1000, A),
]
DETALLE = [
('tipo_reg', 1, N), # 1: detalle item
('codigo', 50, A),
('qty', 12, I, 6),
('umed', 2, N),
('precio', 12, I, 6),
('importe', 13, I, 2),
('bonif', 12, I, 6),
('ds', 4000, A),
]
PERMISO = [
('tipo_reg', 1, N), # 2: permiso
('id_permiso', 16, A),
('dst_merc', 3, N),
]
CMP_ASOC = [
('tipo_reg', 1, N), # 3: comprobante asociado
('cbte_tipo', 3, N), ('cbte_punto_vta', 4, N),
('cbte_nro', 8, N), ('cbte_cuit', 11, N),
]
def leer(linea, formato):
dic = {}
comienzo = 1
for fmt in formato:
clave, longitud, tipo = fmt[0:3]
dec = len(fmt)>3 and fmt[3] or 2
valor = linea[comienzo-1:comienzo-1+longitud].strip()
try:
if tipo == N:
if valor:
valor = str(int(valor))
else:
valor = '0'
elif tipo == I:
if valor:
try:
valor = valor.strip(" ")
valor = float(("%%s.%%0%sd" % dec) % (int(valor[:-dec] or '0'), int(valor[-dec:] or '0')))
except ValueError:
raise ValueError("Campo invalido: %s = '%s'" % (clave, valor))
else:
valor = 0.00
else:
valor = valor.decode("ascii","ignore")
dic[clave] = valor
comienzo += longitud
except Exception, e:
raise ValueError("Error al leer campo %s pos %s val '%s': %s" % (
clave, comienzo, valor, str(e)))
return dic
def escribir(dic, formato):
linea = " " * 335
comienzo = 1
for fmt in formato:
clave, longitud, tipo = fmt[0:3]
dec = len(fmt)>3 and fmt[3] or 2
if clave.capitalize() in dic:
clave = clave.capitalize()
valor = dic.get(clave,"")
if isinstance(valor, unicode):
valor = valor.encode("latin1", "replace")
else:
valor = str(valor)
if tipo == N and valor and valor!="NULL":
valor = ("%%0%dd" % longitud) % int(valor)
elif tipo == I and valor:
valor = ("%%0%dd" % longitud) % (float(valor)*(10**dec))
else:
valor = ("%%0%ds" % longitud) % valor
linea = linea[:comienzo-1] + valor + linea[comienzo-1+longitud:]
comienzo += longitud
return linea + "\n"
def autenticar(cert, privatekey, url):
"Obtener el TA"
TA = "TA.xml"
ttl = 60*60*5
if not os.path.exists(TA) or os.path.getmtime(TA)+(ttl)<time.time():
import wsaa
tra = wsaa.create_tra(service="wsfex",ttl=ttl)
cms = wsaa.sign_tra(str(tra),str(cert),str(privatekey))
ta_string = wsaa.call_wsaa(cms,wsaa_url,trace=DEBUG)
open(TA,"w").write(ta_string)
ta_string=open(TA).read()
ta = SimpleXMLElement(ta_string)
token = str(ta.credentials.token)
sign = str(ta.credentials.sign)
return token, sign
def autorizar(ws, entrada, salida):
# recupero el último número de transacción
##id = wsfex.ultnro(client, token, sign, cuit)
detalles = []
permisos = []
cbtasocs = []
encabezado = {}
for linea in entrada:
if str(linea[0])=='0':
encabezado = leer(linea, ENCABEZADO)
elif str(linea[0])=='1':
detalle = leer(linea, DETALLE)
detalles.append(detalle)
elif str(linea[0])=='2':
permiso = leer(linea, PERMISO)
permisos.append(permiso)
elif str(linea[0])=='3':
cbtasoc = leer(linea, CMP_ASOC)
cbtasocs.append(cbtasoc)
else:
print "Tipo de registro incorrecto:", linea[0]
if not encabezado['id'].strip() or int(encabezado['id'])==0:
# TODO: habria que leer y/o grabar el id en el archivo
##id += 1 # incremento el nº de transacción
# Por el momento, el id se calcula con el tipo, pv y nº de comprobant
i = long(encabezado['cbte_nro'])
i += (int(encabezado['cbte_nro'])*10**4 + int(encabezado['punto_vta']))*10**8
encabezado['id'] = ws.GetLastID() + 1
ws.CrearFactura(**encabezado)
for detalle in detalles:
ws.AgregarItem(**detalle)
for permiso in permisos:
ws.AgregarPermiso(**permiso)
for cbtasoc in cbtasocs:
ws.AgregarCmpAsoc(**cbtasoc)
if DEBUG:
#print f.to_dict()
print '\n'.join(["%s='%s'" % (k,str(v)) for k,v in encabezado.items()])
print 'id:', encabezado['id']
if not DEBUG or not sys.stdout.isatty() or raw_input("Facturar?")=="S":
ws.LanzarExcepcion = False
cae = ws.Authorize(id=encabezado['id'])
dic = ws.factura
dic.update({
'cae': cae and str(cae) or '',
'fch_venc_cae': ws.FchVencCAE and str(ws.FchVencCAE) or '',
'resultado': ws.Resultado or '',
'motivos_obs': ws.Obs or '',
'err_code': str(ws.ErrCode),
'err_msg': ws.ErrMsg or '',
'reproceso': ws.Reproceso or '',
})
escribir_factura(dic, salida)
print "ID:", encabezado['id'], "NRO:", dic['cbte_nro'], "Resultado:", dic['resultado'],
print "CAE:", dic['cae'], "Obs:", dic['motivos_obs'].encode("ascii", "ignore"),
print "Err:", dic['err_msg'].encode("ascii", "ignore"), "Reproceso:", dic['reproceso']
if ws.Excepcion:
print "Excepcion:", ws.Excepcion.encode("ascii", "ignore")
print "Traceback:", ws.Traceback.encode("ascii", "ignore")
def escribir_factura(dic, archivo):
dic['tipo_reg'] = 0
archivo.write(escribir(dic, ENCABEZADO))
for it in dic.get('detalles', []):
it['tipo_reg'] = 1
archivo.write(escribir(it, DETALLE))
if 'permisos' in dic:
for it in dic['permisos']:
it['tipo_reg'] = 2
archivo.write(escribir(it, PERMISO))
def depurar_xml(client):
fecha = time.strftime("%Y%m%d%H%M%S")
f=open("request-%s.xml" % fecha,"w")
f.write(client.xml_request)
f.close()
f=open("response-%s.xml" % fecha,"w")
f.write(client.xml_response)
f.close()
if __name__ == "__main__":
if '/ayuda' in sys.argv:
print LICENCIA
print
print "Opciones: "
print " /ayuda: este mensaje"
print " /dummy: consulta estado de servidores"
print " /prueba: genera y autoriza una factura de prueba (no usar en producción!)"
print " /ult: consulta último número de comprobante"
print " /debug: modo depuración (detalla y confirma las operaciones)"
print " /formato: muestra el formato de los archivos de entrada/salida"
print " /get: recupera datos de un comprobante autorizado previamente (verificación)"
print " /xml: almacena los requerimientos y respuestas XML (depuración)"
print
print "Ver rece.ini para parámetros de configuración (URL, certificados, etc.)"
sys.exit(0)
config = SafeConfigParser()
config.read(CONFIG_FILE)
cert = config.get('WSAA','CERT')
privatekey = config.get('WSAA','PRIVATEKEY')
cuit = config.get('WSFEX','CUIT')
entrada = config.get('WSFEXv1','ENTRADA')
salida = config.get('WSFEXv1','SALIDA')
if config.has_option('WSAA','URL') and not HOMO:
wsaa_url = config.get('WSAA','URL')
else:
wsaa_url = wsaa.WSAAURL
if config.has_option('WSFEXv1','URL') and not HOMO:
wsfexv1_url = config.get('WSFEXv1','URL')
else:
wsfexv1_url = ""
if '/debug'in sys.argv:
DEBUG = True
if '/xml'in sys.argv:
XML = True
if DEBUG:
print "wsaa_url %s\nwsfexv1_url %s" % (wsaa_url, wsfexv1_url)
try:
ws = wsfexv1.WSFEXv1()
ws.Conectar("", wsfexv1_url)
ws.Cuit = cuit
if '/dummy' in sys.argv:
print "Consultando estado de servidores..."
ws.Dummy()
print "AppServerStatus", ws.AppServerStatus
print "DbServerStatus", ws.DbServerStatus
print "AuthServerStatus", ws.AuthServerStatus
sys.exit(0)
if '/formato' in sys.argv:
print "Formato:"
for msg, formato in [('Encabezado', ENCABEZADO), ('Detalle', DETALLE), ('Permiso', PERMISO), ('Comprobante Asociado', CMP_ASOC)]:
comienzo = 1
print "== %s ==" % msg
for fmt in formato:
clave, longitud, tipo = fmt[0:3]
dec = len(fmt)>3 and fmt[3] or (tipo=='I' and '2' or '')
print " * Campo: %-20s Posición: %3d Longitud: %4d Tipo: %s Decimales: %s" % (
clave, comienzo, longitud, tipo, dec)
comienzo += longitud
sys.exit(0)
# TODO: esto habría que guardarlo en un archivo y no tener que autenticar cada vez
token, sign = autenticar(cert, privatekey, wsaa_url)
ws.Token = token
ws.Sign = sign
if '/prueba' in sys.argv:
# generar el archivo de prueba para la próxima factura
f_entrada = open(entrada,"w")
tipo_cbte = 19 # FC Expo (ver tabla de parámetros)
punto_vta = 7
# Obtengo el último número de comprobante y le agrego 1
cbte_nro = int(ws.GetLastCMP(tipo_cbte, punto_vta)) + 1
fecha_cbte = datetime.datetime.now().strftime("%Y%m%d")
tipo_expo = 1 # tipo de exportación (ver tabla de parámetros)
permiso_existente = "S"
dst_cmp = 203 # país destino
cliente = "Joao Da Silva"
cuit_pais_cliente = "50000000016"
domicilio_cliente = "Rua 76 km 34.5 Alagoas"
id_impositivo = "PJ54482221-l"
moneda_id = "012" # para reales, "DOL" o "PES" (ver tabla de parámetros)
moneda_ctz = 0.5
obs_comerciales = "Observaciones comerciales"
obs = "Sin observaciones"
forma_pago = "30 dias"
incoterms = "FOB" # (ver tabla de parámetros)
incoterms_ds = "Flete a Bordo"
idioma_cbte = 1 # (ver tabla de parámetros)
imp_total = "250.00"
# Creo una factura (internamente, no se llama al WebService):
ok = ws.CrearFactura(tipo_cbte, punto_vta, cbte_nro, fecha_cbte,
imp_total, tipo_expo, permiso_existente, dst_cmp,
cliente, cuit_pais_cliente, domicilio_cliente,
id_impositivo, moneda_id, moneda_ctz,
obs_comerciales, obs, forma_pago, incoterms,
idioma_cbte, incoterms_ds)
# Agrego un item:
codigo = "PRO1"
ds = "Producto Tipo 1 Exportacion MERCOSUR ISO 9001"
qty = 2
precio = "150.00"
umed = 1 # Ver tabla de parámetros (unidades de medida)
bonif = "50.00"
imp_total = "250.00" # importe total final del artículo
# lo agrego a la factura (internamente, no se llama al WebService):
ok = ws.AgregarItem(codigo, ds, qty, umed, precio, imp_total, bonif)
# Agrego un permiso (ver manual para el desarrollador)
id = "99999AAXX999999A"
dst = 225 # país destino de la mercaderia
ok = ws.AgregarPermiso(id, dst)
# Agrego un comprobante asociado (solo para N/C o N/D)
if tipo_cbte in (20,21):
cbteasoc_tipo = 19
cbteasoc_pto_vta = 2
cbteasoc_nro = 1234
cbteasoc_cuit = 20111111111
ws.AgregarCmpAsoc(cbteasoc_tipo, cbteasoc_pto_vta, cbteasoc_nro, cbteasoc_cuit)
dic = ws.factura
dic['id'] = ws.GetLastID() + 1
escribir_factura(dic, f_entrada)
f_entrada.close()
if '/ult' in sys.argv:
i = sys.argv.index("/ult")
if i+2<len(sys.argv):
tipo_cbte = int(sys.argv[i+1])
punto_vta = int(sys.argv[i+2])
else:
tipo_cbte = int(raw_input("Tipo de comprobante: "))
punto_vta = int(raw_input("Punto de venta: "))
ult_cbte = ws.GetLastCMP(tipo_cbte, punto_vta)
print "Ultimo numero: ", ult_cbte
print ws.ErrMsg
depurar_xml(ws.client)
escribir_factura({'tipo_cbte': tipo_cbte,
'punto_vta': punto_vta,
'cbt_desde': ult_cbte,
'fecha_cbte': ws.FechaCbte,
'err_msg': ws.ErrMsg,
}, open(salida,"w"))
sys.exit(0)
if '/get' in sys.argv:
print "Recuperar comprobante:"
i = sys.argv.index("/get")
if i+3<len(sys.argv):
tipo_cbte = int(sys.argv[i+1])
punto_vta = int(sys.argv[i+2])
cbte_nro = int(sys.argv[i+3])
else:
tipo_cbte = int(raw_input("Tipo de comprobante: "))
punto_vta = int(raw_input("Punto de venta: "))
cbte_nro = int(raw_input("Numero de comprobante: "))
ws.GetCMP(tipo_cbte, punto_vta, cbte_nro)
print "FechaCbte = ", ws.FechaCbte
print "CbteNro = ", ws.CbteNro
print "PuntoVenta = ", ws.PuntoVenta
print "ImpTotal =", ws.ImpTotal
print "CAE = ", ws.CAE
print "Vencimiento = ", ws.Vencimiento
print ws.ErrMsg
depurar_xml(ws.client)
escribir_factura({'tipo_cbte': tipo_cbte,
'punto_vta': ws.PuntoVenta,
'cbt_desde': ws.CbteNro,
'fecha_cbte': ws.FechaCbte,
'imp_total': ws.ImpTotal,
'cae': str(ws.CAE),
'fch_venc_cae': ws.Vencimiento,
'err_msg': ws.ErrMsg,
}, open(salida,"w"))
sys.exit(0)
f_entrada = f_salida = None
try:
f_entrada = open(entrada,"r")
f_salida = open(salida,"w")
try:
autorizar(ws, f_entrada, f_salida)
except:
XML = True
raise
finally:
if f_entrada is not None: f_entrada.close()
if f_salida is not None: f_salida.close()
if XML:
depurar_xml(ws.client)
sys.exit(0)
except Exception, e:
print unicode(e).encode("ascii","ignore")
if DEBUG or True:
raise
sys.exit(5)
| Python |
#!/usr/bin/python
# -*- coding: latin-1 -*-
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation; either version 3, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTIBILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
# Based on MultipartPostHandler.py (C) 02/2006 Will Holcomb <wholcomb@gmail.com>
# Ejemplos iniciales gracias a "Matias Gieco matigro@gmail.com"
"Módulo para analizar el formato de un remito electrónico (COT)"
__author__ = "Mariano Reingart (reingart@gmail.com)"
__copyright__ = "Copyright (C) 2010 Mariano Reingart"
__license__ = "LGPL 3.0"
import sys
registros = {
'01': 'HEADER',
'02': 'REMITO',
'03': 'PRODUCTOS',
'04': 'FOOTER',
}
formato = {
'01': [
'TIPO_REGISTRO',
'CUIT_EMPRESA'
],
'02': [
'TIPO_REGISTRO',
'FECHA_EMISION',
'CODIGO_UNICO',
'FECHA_SALIDA_TRANSPORTE',
'HORA_SALIDA_TRANSPORTE',
'SUJETO_GENERADOR',
'DESTINATARIO_CONSUMIDOR_FINAL',
'DESTINATARIO_TIPO_DOCUMENTO',
'DESTINATARIO_DOCUEMNTO',
'DESTIANTARIO_CUIT',
'DESTINATARIO_RAZON_SOCIAL',
'DESTINATARIO_TENEDOR',
'DESTINO_DOMICILIO_CALLE',
'DESTINO_DOMICILIO_NUMERO',
'DESTINO_DOMICILIO_COMPLE',
'DESTINO_DOMICILIO_PISO',
'DESTINO_DOMICILIO_DTO',
'DESTINO_DOMICILIO_BARRIO',
'DESTINO_DOMICILIO_CODIGOP',
'DESTINO_DOMICILIO_LOCALIDAD',
'DESTINO_DOMICILIO_PROVINCIA',
'PROPIO_DESTINO_DOMICILIO_CODIGO',
'ENTREGA_DOMICILIO_ORIGEN',
'ORIGEN_CUIT',
'ORIGEN_RAZON_SOCIAL',
'EMISOR_TENEDOR',
'ORIGEN_DOMICILIO_CALLE',
'ORIGEN DOMICILIO_NUMBERO',
'ORIGEN_DOMICILIO_COMPLE',
'ORIGEN_DOMICILIO_PISO',
'ORIGEN_DOMICILIO_DTO',
'ORIGEN_DOMICILIO_BARRIO',
'ORIGEN_DOMICILIO_CODIGOP',
'ORIGEN_DOMICILIO_LOCALIDAD',
'ORIGEN_DOMICILIO_PROVINCIA',
'TRANSPORTISTA_CUIT',
'TIPO_RECORRIDO',
'RECORRIDO_LOCALIDAD',
'RECORRIDO_CALLE',
'RECORRIDO_RUTA',
'PATENTE_VEHICULO',
'PATENTE_ACOPLADO',
'PRODUCTO_NO_TERM_DEV',
'IMPORTE',
],
'03': [
'TIPO_REGISTRO',
'CODIGO_UNICO_PRODUCTO',
'RENTAS_CODIGO_UNIDAD_MEDIDA',
'CANTIDAD',
'PROPIO_CODIGO_PRODUCTO',
'PROPIO_DESCRIPCION_PRODUCTO',
'PROPIO_DESCRIPCION_UNIDAD_MEDIDA',
'CANTIDAD_AJUSTADA',
],
'04': [
'TIPO_REGISTRO',
'CANTIDAD_TOTAL_REMITOS',
]
}
f = open(sys.argv[1])
for l in f:
reg = l[0:2]
if reg in registros:
print "Registro: ", registros[reg]
campos = l.strip("\r").strip("\n").split("|")
for i, campo in enumerate(campos):
print " * %s: |%s|" % (formato[reg][i], campo, )
else:
print "registro incorrecto:", l
| Python |
# Para hacer el ejecutable:
# python setup.py py2exe
#
"Creador de instalador para COT"
__author__ = "Mariano Reingart (reingart@gmail.com)"
__copyright__ = "Copyright (C) 2011 Mariano Reingart"
from distutils.core import setup
import py2exe
import glob, sys
# includes for py2exe
includes=['email.generator', 'email.iterators', 'email.message', 'email.utils']
# don't pull in all this MFC stuff used by the makepy UI.
excludes=["pywin", "pywin.dialogs", "pywin.dialogs.list", "win32ui"]
opts = {
'py2exe': {
'includes':includes,
'optimize':2,
'excludes': excludes,
'dll_excludes': ["mswsock.dll", "powrprof.dll", "KERNELBASE.dll",
"API-MS-Win-Core-LocalRegistry-L1-1-0.dll",
"API-MS-Win-Core-ProcessThreads-L1-1-0.dll",
"API-MS-Win-Security-Base-L1-1-0.dll"
],
'skip_archive': True,
}}
data_files = [
(".", ["licencia.txt"]),
]
import cot
from nsis import build_installer, Target
setup(
name="COT",
version=cot.__version__ + (cot.HOMO and '-homo' or '-full'),
description="Interfaz PyAfipWs COT",
long_description=cot.__doc__,
author="Mariano Reingart",
author_email="reingart@gmail.com",
url="http://www.sistemasagiles.com.ar",
license="GNU GPL v3",
com_server = [Target(module=cot,modules="cot")],
console=[Target(module=cot, script='cot.py', dest_base="cot_cli"),
],
options=opts,
data_files = data_files,
cmdclass = {"py2exe": build_installer}
)
| Python |
#!/usr/bin/python
# -*- coding: latin-1 -*-
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation; either version 3, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTIBILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
"Manejo de XML simple"
__author__ = "Mariano Reingart (mariano@nsis.com.ar)"
__copyright__ = "Copyright (C) 2008/009 Mariano Reingart"
__license__ = "LGPL 3.0"
__version__ = "1.0"
import xml.dom.minidom
DEBUG = False
class SimpleXMLElement(object):
"Clase para Manejo simple de XMLs (simil PHP)"
def __init__(self, text = None, elements = None, document = None, namespace = None, prefix=None):
self.__ns = namespace
self.__prefix = prefix
if text:
try:
self.__document = xml.dom.minidom.parseString(text)
except:
if DEBUG: print text
raise
self.__elements = [self.__document.documentElement]
else:
self.__elements = elements
self.__document = document
def addChild(self,tag,text=None,ns=True):
if not ns or not self.__ns:
if DEBUG: print "adding %s ns %s %s" % (tag, self.__ns,ns)
element = self.__document.createElement(tag)
else:
if DEBUG: print "adding %s ns %s %s" % (tag, self.__ns,ns)
element = self.__document.createElementNS(self.__ns, "%s:%s" % (self.__prefix, tag))
if text:
if isinstance(text, unicode):
element.appendChild(self.__document.createTextNode(text))
else:
element.appendChild(self.__document.createTextNode(str(text)))
self.__element.appendChild(element)
return SimpleXMLElement(
elements=[element],
document=self.__document,
namespace=self.__ns,
prefix=self.__prefix)
def asXML(self,filename=None):
return self.__document.toxml('UTF-8')
def __getattr__(self,tag):
try:
if self.__ns:
if DEBUG: print "searching %s by ns=%s" % (tag,self.__ns)
elements = self.__elements[0].getElementsByTagNameNS(self.__ns, tag)
if not self.__ns or not elements:
if DEBUG: print "searching %s " % (tag)
elements = self.__elements[0].getElementsByTagName(tag)
if not elements:
if DEBUG: print self.__elements[0].toxml()
raise AttributeError("Sin elementos")
return SimpleXMLElement(
elements=elements,
document=self.__document,
namespace=self.__ns,
prefix=self.__prefix)
except AttributeError, e:
raise AttributeError("Tag not found: %s (%s)" % (tag, str(e)))
def __iter__(self):
"Iterate over xml tags"
try:
for __element in self.__elements:
yield SimpleXMLElement(
elements=[__element],
document=self.__document,
namespace=self.__ns,
prefix=self.__prefix)
except:
raise
def __getitem__(self,item):
"Return xml attribute"
return getattr(self.__element, item)
def __contains__( self, item):
return self.__element.getElementsByTagName(item)
def __unicode__(self):
return self.__element.childNodes[0].data
def __str__(self):
if self.__element.childNodes:
rc = ""
for node in self.__element.childNodes:
if node.nodeType == node.TEXT_NODE:
rc = rc + node.data.encode("utf8","ignore")
return rc
return ''
def __repr__(self):
return repr(self.__str__())
def __int__(self):
return int(self.__str__())
def __float__(self):
try:
return float(self.__str__())
except:
raise IndexError(self.__element.toxml())
__element = property(lambda self: self.__elements[0])
if __name__ == "__main__":
span = SimpleXMLElement('<span><a href="google.com">google</a><prueba><i>1</i><float>1.5</float></prueba></span>')
print str(span.a)
print int(span.prueba.i)
print float(span.prueba.float)
span = SimpleXMLElement('<span><a href="google.com">google</a><a>yahoo</a><a>hotmail</a></span>')
for a in span.a:
print str(a)
span.addChild('a','altavista')
print span.asXML() | Python |
#!/usr/bin/python
"""muxer.py
Create a list of feeds and then mix them together into one super feed.
@author Philip Cadigan < phil@inkhorn.org >
@date 3/24/2009
"""
# ---------- CUSTOMIZE FOR YOUR SITE ----------
# for each feed you wish to add, simply put the URL in quotes followed by a comma below.
feed_list = [] # ex: ["http://feeds.feedburner.com/inkhorn/ZPyO","http://del.icio.us/rss/pcad",]
# the meta information for your feed.
meta = {}
meta['title'] = "" # ex: "The inkhorn.org feed"
meta['link'] = "" # ex: "http://inkhorn.org"
meta['feedLink'] = "" # ex: 'http://inkhorn.org/feed/'
meta['description'] = "" # ex: "philip cadigan's wonderful world of inkhornism"
meta['pubDate'] = format_date(gmtime())# now!
meta['generator'] = "feedmux"
# the name of the file where your new feed will live
feed_file = "new.xml" # ex: /home/me/web/rss.xml
# ---------- for basic functionality you shouldn't need to touch this ----------
import feedparser
from operator import itemgetter
import os
import stat
import sys
import traceback
from time import gmtime, strftime
def muxer(site_list):
"""Takes a list of site feeds and returns a time sorted list of feeds."""
entries = []
for site in site_list:
for entry in site.entries:
entries.append(entry)
new_entries = sorted(entries, key=itemgetter('updated_parsed'))
new_entries.reverse()
return new_entries
def rss2(entries, meta):
head = """<?xml version="1.0" encoding="UTF-8"?><rss version="2.0"
xmlns:content="http://purl.org/rss/1.0/modules/content/"
xmlns:wfw="http://wellformedweb.org/CommentAPI/"
xmlns:dc="http://purl.org/dc/elements/1.1/"
xmlns:atom="http://www.w3.org/2005/Atom"
><channel>"""
foot = "</channel></rss>"
meta = """<title>%s</title>
<atom:link href="%s" rel="self" type="application/rss+xml" />
<link>%s</link>
<description>%s</description>
<pubDate>%s</pubDate>
<generator>%s</generator>
<language>en</language>""" % (meta['title'], meta['feedLink'], meta['link'], meta['description'], meta['pubDate'], meta['generator'])
body = ""
for entry in entries:
item = "<item>"
item += "<title><![CDATA[%s]]></title>" % val_from_entry(entry, "title")
item += "<link><![CDATA[%s]]></link>" % val_from_entry(entry, "link")
item += "<pubDate>%s</pubDate>" % format_date(val_from_entry(entry, "created_parsed", "updated_parsed"))
item += "<dc:creator>%s</dc:creator>" % val_from_entry(entry, "author")
#print val_from_entry(entry, "tags")
cats = get_cats(entry)
if cats:
item += "<category><![CDATA[%s]]></category>" % ",".join(cats)
item += '<guid isPermaLink="false"><![CDATA[%s]]></guid>' % val_from_entry(entry, "feedburner_origlink", "link")
item += '<description><![CDATA[%s[...]]]></description>' % val_from_entry(entry, "summary")
content = val_from_entry(entry, "content")
if content:
content = content[0].value
item += '<content:encoded><![CDATA[%s]]></content:encoded>' % content
item += "</item>"
body += item
return head + meta + body + foot
def get_cats(entry):
tags = val_from_entry(entry, "tags")
if not tags:
return []
bits = []
for tag in tags:
if not tag.has_key('term'):
continue
else:
if tag.has_key('scheme') and tag['scheme'] != None:
tmp_bits = tag['term'].split(tag['scheme'])
else:
tmp_bits = tag['term'].split()
for bit in tmp_bits:
bits.append(bit)
return bits
def val_from_entry(entry, *args):
for arg in args:
try:
val = getattr(entry, arg)
return val
except AttributeError:
pass
return ""
def format_date(date_ts):
"""2009-01-19T15:09:53Z"""
return strftime("%a, %d %b %Y %H:%M:%S +0000", date_ts)
if __name__ == "__main__":
try:
# you shouldn't need to edit below this portion
parsed_feeds = []
for feed in feed_list:
parsed_feeds.append(feedparser.parse(feed))
new_entries = muxer(parsed_feeds)
st = rss2(new_entries, meta)
f = file(feed_file, "w")
f.write(st.encode("utf-8"))
f.close()
os.chmod(feed_file, 0644)#stat.S_IREAD|stat.S_IWRITE|stat.S_IRGRP|stat.S_IROTH
except Exception, e:
print traceback.print_exc()
sys.exit(1)
| Python |
#!/usr/bin/python
"""muxer.py
Create a list of feeds and then mix them together into one super feed.
@author Philip Cadigan < phil@inkhorn.org >
@date 3/24/2009
"""
# ---------- CUSTOMIZE FOR YOUR SITE ----------
# for each feed you wish to add, simply put the URL in quotes followed by a comma below.
feed_list = [] # ex: ["http://feeds.feedburner.com/inkhorn/ZPyO","http://del.icio.us/rss/pcad",]
# the meta information for your feed.
meta = {}
meta['title'] = "" # ex: "The inkhorn.org feed"
meta['link'] = "" # ex: "http://inkhorn.org"
meta['feedLink'] = "" # ex: 'http://inkhorn.org/feed/'
meta['description'] = "" # ex: "philip cadigan's wonderful world of inkhornism"
meta['pubDate'] = format_date(gmtime())# now!
meta['generator'] = "feedmux"
# the name of the file where your new feed will live
feed_file = "new.xml" # ex: /home/me/web/rss.xml
# ---------- for basic functionality you shouldn't need to touch this ----------
import feedparser
from operator import itemgetter
import os
import stat
import sys
import traceback
from time import gmtime, strftime
def muxer(site_list):
"""Takes a list of site feeds and returns a time sorted list of feeds."""
entries = []
for site in site_list:
for entry in site.entries:
entries.append(entry)
new_entries = sorted(entries, key=itemgetter('updated_parsed'))
new_entries.reverse()
return new_entries
def rss2(entries, meta):
head = """<?xml version="1.0" encoding="UTF-8"?><rss version="2.0"
xmlns:content="http://purl.org/rss/1.0/modules/content/"
xmlns:wfw="http://wellformedweb.org/CommentAPI/"
xmlns:dc="http://purl.org/dc/elements/1.1/"
xmlns:atom="http://www.w3.org/2005/Atom"
><channel>"""
foot = "</channel></rss>"
meta = """<title>%s</title>
<atom:link href="%s" rel="self" type="application/rss+xml" />
<link>%s</link>
<description>%s</description>
<pubDate>%s</pubDate>
<generator>%s</generator>
<language>en</language>""" % (meta['title'], meta['feedLink'], meta['link'], meta['description'], meta['pubDate'], meta['generator'])
body = ""
for entry in entries:
item = "<item>"
item += "<title><![CDATA[%s]]></title>" % val_from_entry(entry, "title")
item += "<link><![CDATA[%s]]></link>" % val_from_entry(entry, "link")
item += "<pubDate>%s</pubDate>" % format_date(val_from_entry(entry, "created_parsed", "updated_parsed"))
item += "<dc:creator>%s</dc:creator>" % val_from_entry(entry, "author")
#print val_from_entry(entry, "tags")
cats = get_cats(entry)
if cats:
item += "<category><![CDATA[%s]]></category>" % ",".join(cats)
item += '<guid isPermaLink="false"><![CDATA[%s]]></guid>' % val_from_entry(entry, "feedburner_origlink", "link")
item += '<description><![CDATA[%s[...]]]></description>' % val_from_entry(entry, "summary")
content = val_from_entry(entry, "content")
if content:
content = content[0].value
item += '<content:encoded><![CDATA[%s]]></content:encoded>' % content
item += "</item>"
body += item
return head + meta + body + foot
def get_cats(entry):
tags = val_from_entry(entry, "tags")
if not tags:
return []
bits = []
for tag in tags:
if not tag.has_key('term'):
continue
else:
if tag.has_key('scheme') and tag['scheme'] != None:
tmp_bits = tag['term'].split(tag['scheme'])
else:
tmp_bits = tag['term'].split()
for bit in tmp_bits:
bits.append(bit)
return bits
def val_from_entry(entry, *args):
for arg in args:
try:
val = getattr(entry, arg)
return val
except AttributeError:
pass
return ""
def format_date(date_ts):
"""2009-01-19T15:09:53Z"""
return strftime("%a, %d %b %Y %H:%M:%S +0000", date_ts)
if __name__ == "__main__":
try:
# you shouldn't need to edit below this portion
parsed_feeds = []
for feed in feed_list:
parsed_feeds.append(feedparser.parse(feed))
new_entries = muxer(parsed_feeds)
st = rss2(new_entries, meta)
f = file(feed_file, "w")
f.write(st.encode("utf-8"))
f.close()
os.chmod(feed_file, 0644)#stat.S_IREAD|stat.S_IWRITE|stat.S_IRGRP|stat.S_IROTH
except Exception, e:
print traceback.print_exc()
sys.exit(1)
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
"""
from django import forms
from django.utils.translation import ugettext as _
from feedjack.models import Tag, Subscriber, Post
SEARCH_CHOICES = (
("posts", _("Posts")),
("tags", _("Tags")),
("blogs", _("Blogs")),
("authors", _("Authors")),
)
class SearchForm(forms.Form):
w = forms.ChoiceField(choices=SEARCH_CHOICES, label="")
q = forms.CharField(max_length=100, label="")
| Python |
# -*- coding: utf-8 -*-
from django.conf import settings
from feedjack.models import Site, Link
from feedjack_extension.forms import SearchForm
def context(request):
if request.method == "GET" and request.GET.get("search"):
search_form = SearchForm(request.GET)
else:
search_form = SearchForm()
return {"site": Site.objects.get(pk=settings.FEEDJACK_SITE_ID),
"links": Link.objects.all(),
"search_form": search_form}
| Python |
# -*- coding: utf-8 -*-
import os
from datetime import datetime
from django.http import HttpResponse
from django.utils import feedgenerator
from django.utils.cache import patch_vary_headers
from django.core.urlresolvers import reverse
from django.conf import settings
from django.shortcuts import get_object_or_404, Http404
from django.template.defaultfilters import linebreaks, escape, capfirst
from django.utils.translation import ugettext_lazy as _
from atompub import atomformat
from feedjack.models import Post, Site, Subscriber, Tag, Feed
ITEMS_PER_FEED = getattr(settings, 'FJ_EXTENSION_ITEMS_PER_FEED', 50)
class BasePostFeed(atomformat.Feed):
def __init__(self, *args, **kwargs):
super(BasePostFeed, self).__init__(args, kwargs)
self.site = Site.objects.get(pk=settings.FEEDJACK_SITE_ID)
def item_id(self, post):
return post.guid
def item_title(self, post):
return post.title
def item_updated(self, post):
return post.date_modified
def item_published(self, post):
return post.date_created
def item_content(self, post):
return {"type" : "html", }, linebreaks(escape(post.content))
def item_links(self, post):
return [{"href" : reverse("post_show", args=( post.pk,))}]
def item_authors(self, post):
return [{"name" : post.author}]
class PostFeed(BasePostFeed):
def feed_id(self):
return reverse("posts_list")
def feed_title(self):
return _("Posts in %s") % self.site.name
def feed_subtitle(self):
return _("All posts")
def feed_updated(self):
qs = Post.objects.filter(feed__subscriber__site=self.site)
# We return an arbitrary date if there are no results, because there
# must be a feed_updated field as per the Atom specifications, however
# there is no real data to go by, and an arbitrary date can be static.
if qs.count() == 0:
return datetime(year=2008, month=7, day=1)
return qs.latest('date_created').date_created
def feed_links(self):
return ({'href': reverse('posts_list')},)
def items(self):
posts_list = Post.objects.filter(feed__subscriber__site=self.site
).order_by("-date_created")[:ITEMS_PER_FEED]
return posts_list
class SubscriberFeed(BasePostFeed):
def get_object(self, params):
return get_object_or_404(Subscriber, pk=params[0], is_active=True)
def feed_id(self, subscriber):
return reverse("subscriber_show", args=(subscriber.pk, ))
def feed_title(self, subscriber):
return _("Posts by %s - %s") % (subscriber.name, self.site.name)
def feed_updated(self, subscriber):
qs = Post.objects.filter(feed__subscriber=subscriber).distinct()
# We return an arbitrary date if there are no results, because there
# must be a feed_updated field as per the Atom specifications, however
# there is no real data to go by, and an arbitrary date can be static.
if qs.count() == 0:
return datetime(year=2008, month=7, day=1)
return qs.latest('date_created').date_created
def feed_links(self, subscriber):
return ({'href': reverse("subscriber_show", args=(subscriber.pk, ))},)
def items(self, subscriber):
return Post.objects.filter(feed__subscriber=subscriber,
).distinct().order_by("-date_created")[:ITEMS_PER_FEED]
class BlogFeed(BasePostFeed):
def get_object(self, params):
return get_object_or_404(Feed, pk=params[0], is_active=True)
def feed_id(self, feed):
return reverse("feed_show", args=(feed.pk, ))
def feed_title(self, feed):
return _("Posts in %s - %s") % (feed.title, self.site.name)
def feed_subtitle(self, feed):
return "%s - %s" % (feed.tagline, feed.link)
def feed_updated(self, feed):
qs = Post.objects.filter(feed=feed,
feed__subscriber__site=self.site).distinct()
# We return an arbitrary date if there are no results, because there
# must be a feed_updated field as per the Atom specifications, however
# there is no real data to go by, and an arbitrary date can be static.
if qs.count() == 0:
return datetime(year=2008, month=7, day=1)
return qs.latest('date_created').date_created
def feed_links(self, feed):
return ({'href': reverse("feed_show", args=(feed.pk, ))},)
def items(self, feed):
return Post.objects.filter(feed=feed,
feed__subscriber__site=self.site).distinct(
).order_by("-date_created")[:ITEMS_PER_FEED]
class TagFeed(BasePostFeed):
def get_object(self, params):
return get_object_or_404(Tag, name=params[0])
def feed_id(self, tag):
return reverse("tag_show", args=(tag.pk, ))
def feed_title(self, tag):
return _("Posts under %s tag - %s") % (tag, self.site.name)
def feed_updated(self, tag):
qs = Post.objects.filter(tags__name=tag,
feed__subscriber__site=self.site).distinct()
# We return an arbitrary date if there are no results, because there
# must be a feed_updated field as per the Atom specifications, however
# there is no real data to go by, and an arbitrary date can be static.
if qs.count() == 0:
return datetime(year=2008, month=7, day=1)
return qs.latest('date_created').date_created
def feed_links(self, tag):
return ({'href': reverse("tag_show", args=(tag.pk, ))},)
def items(self, tag):
return Post.objects.filter(tags__name=tag, feed__subscriber__site=self.site
).distinct().order_by("-date_created")[:ITEMS_PER_FEED]
class SubscriberTagFeed(BasePostFeed):
def __init__(self, *args, **kwargs):
super(SubscriberTagFeed, self).__init__(args, kwargs)
self.tag = kwargs["tag"]
def get_object(self, params):
return get_object_or_404(Subscriber, pk=params[0], is_active=True)
def feed_id(self, subscriber):
return reverse("by_tag_subscriber_show", args=(subscriber.pk, self.tag))
def feed_title(self, subscriber):
return _("Posts by %s under %s tag - %s")\
% (subscriber.name, self.tag, self.site.name)
def feed_updated(self, subscriber):
qs = Post.objects.filter(feed__subscriber=subscriber,
tags__name=self.tag).distinct()
# We return an arbitrary date if there are no results, because there
# must be a feed_updated field as per the Atom specifications, however
# there is no real data to go by, and an arbitrary date can be static.
if qs.count() == 0:
return datetime(year=2008, month=7, day=1)
return qs.latest('date_created').date_created
def feed_links(self, subscriber):
return ({'href': reverse("by_tag_subscriber_show", args=(subscriber.pk, self.tag))},)
def items(self, subscriber):
return Post.objects.filter(
feed__subscriber=subscriber, tags__name=self.tag
).distinct().order_by("-date_created")[:ITEMS_PER_FEED]
class SubscriberFeedChooser:
def __init__(self, *args, **kwargs):
self.request = args[1]
self.url = args[0]
def get_feed(self, params):
check_params = params.split("/")
if len(check_params) == 1:
return SubscriberFeed(self.url, self.request).get_feed(params)
else:
if not len(check_params) == 3:
raise Http404, "Feed does not exist"
if not check_params[1] == "tags":
raise Http404, "Feed does not exist"
tag = check_params[2]
return SubscriberTagFeed(self.url, self.request, tag=tag).get_feed(params)
def rss_feed(request, tag=None, subscriber_id=None):
site = get_object_or_404(Site, pk=settings.FEEDJACK_SITE_ID)
params_dict = {"feed__subscriber__site": site}
pretitle = ""
title = "%s (RSS Feed)" % site.title
if tag:
params_dict.update({"tags__name": tag})
pretitle = "%s %s " % (tag, _("in"))
if subscriber_id:
params_dict.update({"feed__subscriber": subscriber_id})
subscriber = Subscriber.objects.get(pk=subscriber_id)
pretitle = "%s %s " % (subscriber.name, _("in"))
try:
posts_count = settings.FJ_EXTENSION_ITEMS_PER_FEED
except AttributeError:
posts_count = 50
object_list = Post.objects.filter(**params_dict).distinct()[:posts_count]
feed = feedgenerator.Rss201rev2Feed(title=pretitle + title,
link=site.url, description=site.description,
feed_url=os.path.join(site.url, reverse("default_feed")))
for post in object_list:
feed.add_item(
title = '%s: %s' % (post.feed.name, post.title),
link = reverse("post_show", args=(post.pk,)),
description = post.content,
author_email = post.author_email,
author_name = post.author,
pubdate = post.date_modified,
unique_id = post.link,
categories = [tag.name for tag in post.tags.all()]
)
response = HttpResponse(mimetype=feed.mime_type)
# per host caching
patch_vary_headers(response, ['Host'])
feed.write(response, 'utf-8')
return response | Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
"""
from django import forms
from django.utils.translation import ugettext as _
from feedjack.models import Tag, Subscriber, Post
SEARCH_CHOICES = (
("posts", _("Posts")),
("tags", _("Tags")),
("blogs", _("Blogs")),
("authors", _("Authors")),
)
class SearchForm(forms.Form):
w = forms.ChoiceField(choices=SEARCH_CHOICES, label="")
q = forms.CharField(max_length=100, label="")
| Python |
# -*- coding: utf-8 -*-
"""
Based on Feedjack urls.py by Gustavo Picón
urls.py
"""
from django.conf.urls.defaults import patterns, url
urlpatterns = patterns('feedjack_extension.views',
url(r'^subscribers/(?P<subscriber_id>\d+)/tags/(?P<tag>.*)/$', "subscriber_show", name="by_tag_subscriber_show"),
url(r'^subscribers/(?P<subscriber_id>\d+)/$', "subscriber_show", name="subscriber_show"),
url(r'^subscribers/$', "subscribers_list", name="subscribers_list"),
url(r'^feeds/(?P<feed_id>\d+)/tags/(?P<tag>.*)/$', "feed_show", name="by_tag_feed_show"),
url(r'^feeds/(?P<feed_id>\d+)/$', "feed_show", name="feed_show"),
url(r'^feeds/$', "feeds_list", name="feeds_list"),
url(r'^tags/(?P<tag>.*)/feeds/$', "tag_feeds_list", name="tag_feeds_list"),
url(r'^tags/(?P<tag>.*)/subscribers/$', "tag_subscribers_list", name="tag_subscribers_list"),
url(r'^tags/(?P<tag>.*)/$', "tag_show", name="tag_show"),
url(r'^tags/$', "tags_cloud", name="tags_cloud"),
url(r'^opml/$', "opml", name="opml"),
url(r'^foaf/$', "foaf", name="foaf"),
url(r'^posts/(?P<post_id>\d+)/$', "post_show", name="post_show"),
url(r'^posts/$', "posts_list", name="posts_list"),
url(r'^search/$', "search", name="search"),
)
urlpatterns += patterns('feedjack_extension.feeds',
url(r'^posts/rss20.xml$', "rss_feed", name="default_feed"),
url(r'^posts/feeds/rss/$', "rss_feed", name="rss_feed"),
url(r'^feeds/rss/tags/(?P<tag>.*)/$', "rss_feed", name="tag_rss_feed"),
url(r'^feeds/rss/subscribers/(?P<subscriber_id>\d+)/$', "rss_feed", name="subscriber_rss_feed"),
url(r'^feeds/rss/subscribers/(?P<subscriber_id>\d+)/tags/(?P<tag>.*)/$', "rss_feed", name="tag_subscriber_rss_feed"),
)
#url(r'^posts/feed/$', "atom_feed", name="atom_feed"),
#url(r'^posts/feed/$', "atom_feed", name="default_feed"),
#url(r'^posts/feeds/atom/$', "atom_feed", name="atom_feed"),
#url(r'^feeds/tags/(?P<tag>.*)/$', "atom_feed", name="tag_feed"),
#url(r'^feeds/atom/tags/(?P<tag>.*)/$', "atom_feed", name="tag_atom_feed"),
#url(r'^feeds/subscribers/(?P<subscriber_id>\d+)/$', "atom_feed", name="subscriber_feed"),
#url(r'^feeds/atom/subscribers/(?P<subscriber_id>\d+)/$', "atom_feed", name="subscriber_atom_feed"),
#url(r'^feeds/subscribers/(?P<subscriber_id>\d+)/tag/(?P<tag>.*)/$', "atom_feed", name="subscriber_tag_feed"),
#url(r'^feeds/atom/subscribers/(?P<subscriber_id>\d+)/tag/(?P<tag>.*)/$', "atom_feed", name="tag_subscriber_atom_feed"),
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
"""
from django import template
from django.db.models import Count, Max, F
from django.conf import settings
from feedjack.models import Tag, Subscriber, Feed
register = template.Library()
@register.simple_tag
def normalize_number(number, max_values, levels=16):
return 14 + ((float(number) / float(max_values)) * levels)
@register.inclusion_tag('tags/blocks/cloud.html')
def tags_cloud(min_posts_count=1):
max_posts = Tag.objects.annotate(count=Count("post"))
max_posts = max_posts.filter(
count__gt=min_posts_count, name__isnull=False)
max_posts = max_posts.aggregate(Max("count"))
max_posts_count = max_posts["count__max"]
tags_cloud = Tag.objects.annotate(count=Count("post"))
tags_cloud = tags_cloud.filter(name__isnull=False,
count__gt=min_posts_count).order_by("name")
return {"tags_cloud": tags_cloud, "max_posts_count": max_posts_count}
@register.inclusion_tag('tags/blocks/subscribers_cloud.html')
def tags_cloud_for_subscriber(subscriber=None, min_posts_count=0):
max_posts = Tag.objects.annotate(count=Count("post"))
max_posts = max_posts.filter(name__isnull=False,
count__gt=min_posts_count, post__feed__subscriber=subscriber)
max_posts = max_posts.aggregate(Max("count"))
max_posts_count = max_posts["count__max"]
tags_cloud = Tag.objects.annotate(count=Count("post"))
tags_cloud = tags_cloud.filter(name__isnull=False,
count__gt=min_posts_count, post__feed__subscriber=subscriber
).order_by("name")
return {"subscriber": subscriber, "tags_cloud": tags_cloud,
"max_posts_count": max_posts_count}
@register.inclusion_tag('tags/blocks/feeds_cloud.html')
def tags_cloud_for_feed(feed=None, min_posts_count=0):
max_posts = Tag.objects.annotate(count=Count("post"))
max_posts = max_posts.filter(name__isnull=False,
count__gt=min_posts_count, post__feed=feed)
max_posts = max_posts.aggregate(Max("count"))
max_posts_count = max_posts["count__max"]
tags_cloud = Tag.objects.annotate(count=Count("post"))
tags_cloud = tags_cloud.filter(name__isnull=False,
count__gt=min_posts_count, post__feed=feed
).order_by("name")
return {"feed": feed, "tags_cloud": tags_cloud,
"max_posts_count": max_posts_count}
@register.inclusion_tag('subscribers/blocks/list_for_tag.html')
def subscribers_about(tag):
subscribers = Subscriber.objects.filter(site=settings.FEEDJACK_SITE_ID,
feed__post__tags__name=tag).distinct()
return {"subscribers": subscribers, "tag": tag}
@register.inclusion_tag('feeds/blocks/list_for_tag.html')
def feeds_about(tag):
feeds_list = Feed.objects.filter(
subscriber__site=settings.FEEDJACK_SITE_ID,
post__tags__name=tag).distinct()
return {"feeds_list": feeds_list, "tag": tag}
@register.inclusion_tag("tags/blocks/related_list.html")
def related_tags_for(tag, count=20):
related_tags = Tag.objects.filter(post__tags=tag)\
.annotate(Count("post")).exclude(name=tag.name)\
.distinct().order_by("-post__count", "name")[:count]
max_posts_count = related_tags and related_tags[0].post__count or 0
return {"related_tags": related_tags, "max_posts_count": max_posts_count}
@register.simple_tag
def tags_count_for(instance):
if isinstance(instance, Feed):
return Tag.objects.filter(post__feed=instance
).distinct().count()
elif isinstance(instance, Subscriber):
return Tag.objects.filter(post__feed__subscriber=instance
).distinct().count()
else:
return 0
@register.inclusion_tag("feeds/blocks/related_feeds.html")
def related_feeds_for(feed, count=10):
feed_tags = Tag.objects.filter(post__feed=feed
).distinct()
related_feeds = []
if feed_tags:
related_feeds = Feed.objects.filter(post__tags__in=feed_tags
).exclude(pk=feed.pk).distinct()[:count]
return {"related_feeds": related_feeds}
@register.inclusion_tag("posts/details.html")
def post_details(post):
return {"post": post}
#@register.simple_tag
#def year_cloud_for(instance):
#if isinstance(instance, Feed):
#return Post.objects.values("date_modified").distinct().annotate(Count("pk")).order_by("date_modified")
#elif isinstance(instance, Subscriber):
#return []
#else:
#return [] | Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
"""
from django import template
from django.db.models import Count, Max, F
from django.conf import settings
from feedjack.models import Tag, Subscriber, Feed
register = template.Library()
@register.simple_tag
def normalize_number(number, max_values, levels=16):
return 14 + ((float(number) / float(max_values)) * levels)
@register.inclusion_tag('tags/blocks/cloud.html')
def tags_cloud(min_posts_count=1):
max_posts = Tag.objects.annotate(count=Count("post"))
max_posts = max_posts.filter(
count__gt=min_posts_count, name__isnull=False)
max_posts = max_posts.aggregate(Max("count"))
max_posts_count = max_posts["count__max"]
tags_cloud = Tag.objects.annotate(count=Count("post"))
tags_cloud = tags_cloud.filter(name__isnull=False,
count__gt=min_posts_count).order_by("name")
return {"tags_cloud": tags_cloud, "max_posts_count": max_posts_count}
@register.inclusion_tag('tags/blocks/subscribers_cloud.html')
def tags_cloud_for_subscriber(subscriber=None, min_posts_count=0):
max_posts = Tag.objects.annotate(count=Count("post"))
max_posts = max_posts.filter(name__isnull=False,
count__gt=min_posts_count, post__feed__subscriber=subscriber)
max_posts = max_posts.aggregate(Max("count"))
max_posts_count = max_posts["count__max"]
tags_cloud = Tag.objects.annotate(count=Count("post"))
tags_cloud = tags_cloud.filter(name__isnull=False,
count__gt=min_posts_count, post__feed__subscriber=subscriber
).order_by("name")
return {"subscriber": subscriber, "tags_cloud": tags_cloud,
"max_posts_count": max_posts_count}
@register.inclusion_tag('tags/blocks/feeds_cloud.html')
def tags_cloud_for_feed(feed=None, min_posts_count=0):
max_posts = Tag.objects.annotate(count=Count("post"))
max_posts = max_posts.filter(name__isnull=False,
count__gt=min_posts_count, post__feed=feed)
max_posts = max_posts.aggregate(Max("count"))
max_posts_count = max_posts["count__max"]
tags_cloud = Tag.objects.annotate(count=Count("post"))
tags_cloud = tags_cloud.filter(name__isnull=False,
count__gt=min_posts_count, post__feed=feed
).order_by("name")
return {"feed": feed, "tags_cloud": tags_cloud,
"max_posts_count": max_posts_count}
@register.inclusion_tag('subscribers/blocks/list_for_tag.html')
def subscribers_about(tag):
subscribers = Subscriber.objects.filter(site=settings.FEEDJACK_SITE_ID,
feed__post__tags__name=tag).distinct()
return {"subscribers": subscribers, "tag": tag}
@register.inclusion_tag('feeds/blocks/list_for_tag.html')
def feeds_about(tag):
feeds_list = Feed.objects.filter(
subscriber__site=settings.FEEDJACK_SITE_ID,
post__tags__name=tag).distinct()
return {"feeds_list": feeds_list, "tag": tag}
@register.inclusion_tag("tags/blocks/related_list.html")
def related_tags_for(tag, count=20):
related_tags = Tag.objects.filter(post__tags=tag)\
.annotate(Count("post")).exclude(name=tag.name)\
.distinct().order_by("-post__count", "name")[:count]
max_posts_count = related_tags and related_tags[0].post__count or 0
return {"related_tags": related_tags, "max_posts_count": max_posts_count}
@register.simple_tag
def tags_count_for(instance):
if isinstance(instance, Feed):
return Tag.objects.filter(post__feed=instance
).distinct().count()
elif isinstance(instance, Subscriber):
return Tag.objects.filter(post__feed__subscriber=instance
).distinct().count()
else:
return 0
@register.inclusion_tag("feeds/blocks/related_feeds.html")
def related_feeds_for(feed, count=10):
feed_tags = Tag.objects.filter(post__feed=feed
).distinct()
related_feeds = []
if feed_tags:
related_feeds = Feed.objects.filter(post__tags__in=feed_tags
).exclude(pk=feed.pk).distinct()[:count]
return {"related_feeds": related_feeds}
@register.inclusion_tag("posts/details.html")
def post_details(post):
return {"post": post}
#@register.simple_tag
#def year_cloud_for(instance):
#if isinstance(instance, Feed):
#return Post.objects.values("date_modified").distinct().annotate(Count("pk")).order_by("date_modified")
#elif isinstance(instance, Subscriber):
#return []
#else:
#return [] | Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import feedparser
from django.core.management.base import BaseCommand
from django.conf import settings
from feedjack.models import Feed, Subscriber
class Command(BaseCommand):
def handle(self, *args, **options):
if not len(args):
print "You must provide the feed url as parameter"
exit(0)
url = args[0]
document = feedparser.parse(url)
try:
title = document.feed.title
except AttributeError:
title = "--"
try:
author_name = document.feed.author_detail.name
if not author_name:
author_name = document.feed.author
except AttributeError:
author_name = None
try:
feed = Feed(feed_url=url, name=title, shortname=title)
feed.save()
except:
print "That feed is already saved."
exit(0)
subscriber = Subscriber()
subscriber.site_id = settings.FEEDJACK_SITE_ID
subscriber.feed = feed
subscriber.name = author_name
subscriber.shortname = author_name
subscriber.save()
print "done" | Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import feedparser
from django.core.management.base import BaseCommand
from django.conf import settings
from feedjack.models import Feed, Subscriber
class Command(BaseCommand):
def handle(self, *args, **options):
if not len(args):
print "You must provide the feed url as parameter"
exit(0)
url = args[0]
document = feedparser.parse(url)
try:
title = document.feed.title
except AttributeError:
title = "--"
try:
author_name = document.feed.author_detail.name
if not author_name:
author_name = document.feed.author
except AttributeError:
author_name = None
try:
feed = Feed(feed_url=url, name=title, shortname=title)
feed.save()
except:
print "That feed is already saved."
exit(0)
subscriber = Subscriber()
subscriber.site_id = settings.FEEDJACK_SITE_ID
subscriber.feed = feed
subscriber.name = author_name
subscriber.shortname = author_name
subscriber.save()
print "done" | Python |
# -*- coding: utf-8 -*-
from django.utils import feedgenerator
from django.utils.cache import patch_vary_headers
from django.shortcuts import render_to_response, get_object_or_404
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect, HttpResponse
from django.template import Context, loader, RequestContext
from django.conf import settings
from django.db.models import Count, Max
from django.utils.translation import ugettext_lazy as _
from feedjack.models import Site, Feed, Subscriber, Post, Tag
from feedjack_extension.forms import SearchForm
def subscribers_list(request):
subscribers = Subscriber.objects.filter(site=settings.FEEDJACK_SITE_ID)
return render_to_response("subscribers/list.html",
{"subscribers_list": subscribers},
context_instance=RequestContext(request))
def subscriber_show(request, subscriber_id, tag=None):
subscriber = get_object_or_404(Subscriber, pk=subscriber_id)
params_dict = {"feed__subscriber": subscriber,
"feed__subscriber__site": settings.FEEDJACK_SITE_ID}
if tag:
tag = get_object_or_404(Tag, name=tag)
params_dict.update({"tags": tag})
posts = Post.objects.filter(**params_dict).order_by("-date_created")
return render_to_response("subscribers/show.html",
{"subscriber": subscriber, "posts": posts, "tag": tag},
context_instance=RequestContext(request))
def feeds_list(request):
feeds_list = Feed.objects.filter(
subscriber__site=settings.FEEDJACK_SITE_ID)
return render_to_response("feeds/list.html", {"feeds_list": feeds_list},
context_instance=RequestContext(request))
def feed_show(request, feed_id, tag=None):
feed = get_object_or_404(Feed, pk=feed_id)
params_dict = {"feed": feed,
"feed__subscriber__site": settings.FEEDJACK_SITE_ID}
if tag:
tag = get_object_or_404(Tag, name=tag)
params_dict.update({"tags": tag})
posts = Post.objects.filter(**params_dict).order_by("-date_created")
return render_to_response("feeds/show.html",
{"feed": feed, "posts": posts, "tag": tag},
context_instance=RequestContext(request))
def posts_list(request):
posts = Post.objects.filter(feed__subscriber__site=settings.FEEDJACK_SITE_ID
).distinct().order_by("-date_created")
return render_to_response("posts/list.html", {"posts": posts},
context_instance=RequestContext(request))
def post_show(request, post_id):
post = get_object_or_404(Post, pk=post_id)
return render_to_response("posts/show.html", {"post": post},
context_instance=RequestContext(request))
def tag_show(request, tag):
tag = get_object_or_404(Tag, name=tag)
posts = Post.objects.filter(tags=tag,
feed__subscriber__site=settings.FEEDJACK_SITE_ID
).distinct().order_by("-date_created")
return render_to_response("tags/show.html", {"posts": posts, "tag": tag},
context_instance=RequestContext(request))
def tag_subscribers_list(request, tag):
tag = get_object_or_404(Tag, name=tag)
subscribers = Subscriber.objects.filter(site=settings.FEEDJACK_SITE_ID,
feed__post__tags__name=tag).distinct()
return render_to_response("subscribers/list_for_tag.html",
{"subscribers": subscribers, "tag": tag},
context_instance=RequestContext(request))
def tag_feeds_list(request, tag):
tag = get_object_or_404(Tag, name=tag)
feeds_list = Feed.objects.filter(post__tags__name=tag,
subscriber__site=settings.FEEDJACK_SITE_ID).distinct()
return render_to_response("feeds/list_for_tag.html",
{"feeds_list": feeds_list, "tag": tag},
context_instance=RequestContext(request))
def tags_cloud(request, min_posts_count=1):
max_posts_count = Tag.objects.annotate(count=Count("post")
).filter(count__gt=min_posts_count, name__isnull=False).aggregate(Max("count"))
max_posts_count = max_posts_count["count__max"]
tags_cloud = Tag.objects.annotate(count=Count("post")
).filter(count__gt=min_posts_count, name__isnull=False).order_by("name")
return render_to_response("tags/cloud.html",
{"tags_cloud": tags_cloud, "max_posts_count": max_posts_count},
context_instance=RequestContext(request))
def foaf(request):
feeds = Feed.objects.filter(
subscriber__site=settings.FEEDJACK_SITE_ID).distinct()
return render_to_response("microformats/foaf.xml", {"feeds": feeds},
context_instance=RequestContext(request), mimetype="text/xml")
def opml(request):
feeds = Feed.objects.filter(
subscriber__site=settings.FEEDJACK_SITE_ID).distinct()
return render_to_response("microformats/opml.xml", {"feeds": feeds},
context_instance=RequestContext(request), mimetype="text/xml")
def search(request):
if request.method == "GET" and request.GET.get("search") == "go":
search_form = SearchForm(request.GET)
if search_form.is_valid():
query = search_form.cleaned_data["q"]
if search_form.cleaned_data["w"] == "posts":
params_dict = {"title__icontains": query,
"feed__subscriber__site": settings.FEEDJACK_SITE_ID}
posts = Post.objects.filter(**params_dict
).distinct().order_by("-date_created")
return render_to_response("posts/list.html", {"posts": posts},
context_instance=RequestContext(request))
elif search_form.cleaned_data["w"] == "tags":
params_dict = {"name__icontains": query,
"post__feed__subscriber__site": settings.FEEDJACK_SITE_ID}
tags_list = Tag.objects.filter(**params_dict
).distinct().order_by("name")
return render_to_response("tags/list.html",
{"tags_list": tags_list},
context_instance=RequestContext(request))
elif search_form.cleaned_data["w"] == "blogs":
params_dict = {"name__icontains": query,
"subscriber__site": settings.FEEDJACK_SITE_ID}
feeds_list = Feed.objects.filter(**params_dict
).distinct().order_by("name")
return render_to_response("feeds/list.html",
{"feeds_list": feeds_list},
context_instance=RequestContext(request))
elif search_form.cleaned_data["w"] == "authors":
params_dict = {"name__icontains": query,
"feed__subscriber__site": settings.FEEDJACK_SITE_ID}
subscribers_list = Subscriber.objects.filter(**params_dict
).distinct().order_by("name")
return render_to_response("subscribers/list.html",
{"subscribers_list": subscribers_list},
context_instance=RequestContext(request))
else:
return HttpResponseRedirect(reverse("posts_list"))
else:
return HttpResponseRedirect(reverse("posts_list"))
else:
return HttpResponseRedirect(reverse("posts_list")) | Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
"""
from django import forms
from django.utils.translation import ugettext as _
from feedjack.models import Tag, Subscriber, Post
SEARCH_CHOICES = (
("posts", _("Posts")),
("tags", _("Tags")),
("blogs", _("Blogs")),
("authors", _("Authors")),
)
class SearchForm(forms.Form):
w = forms.ChoiceField(choices=SEARCH_CHOICES, label="")
q = forms.CharField(max_length=100, label="")
| Python |
# -*- coding: utf-8 -*-
from django.conf import settings
from feedjack.models import Site, Link
from feedjack_extension.forms import SearchForm
def context(request):
if request.method == "GET" and request.GET.get("search"):
search_form = SearchForm(request.GET)
else:
search_form = SearchForm()
return {"site": Site.objects.get(pk=settings.FEEDJACK_SITE_ID),
"links": Link.objects.all(),
"search_form": search_form}
| Python |
# -*- coding: utf-8 -*-
import os
from datetime import datetime
from django.http import HttpResponse
from django.utils import feedgenerator
from django.utils.cache import patch_vary_headers
from django.core.urlresolvers import reverse
from django.conf import settings
from django.shortcuts import get_object_or_404, Http404
from django.template.defaultfilters import linebreaks, escape, capfirst
from django.utils.translation import ugettext_lazy as _
from atompub import atomformat
from feedjack.models import Post, Site, Subscriber, Tag, Feed
ITEMS_PER_FEED = getattr(settings, 'FJ_EXTENSION_ITEMS_PER_FEED', 50)
class BasePostFeed(atomformat.Feed):
def __init__(self, *args, **kwargs):
super(BasePostFeed, self).__init__(args, kwargs)
self.site = Site.objects.get(pk=settings.FEEDJACK_SITE_ID)
def item_id(self, post):
return post.guid
def item_title(self, post):
return post.title
def item_updated(self, post):
return post.date_modified
def item_published(self, post):
return post.date_created
def item_content(self, post):
return {"type" : "html", }, linebreaks(escape(post.content))
def item_links(self, post):
return [{"href" : reverse("post_show", args=( post.pk,))}]
def item_authors(self, post):
return [{"name" : post.author}]
class PostFeed(BasePostFeed):
def feed_id(self):
return reverse("posts_list")
def feed_title(self):
return _("Posts in %s") % self.site.name
def feed_subtitle(self):
return _("All posts")
def feed_updated(self):
qs = Post.objects.filter(feed__subscriber__site=self.site)
# We return an arbitrary date if there are no results, because there
# must be a feed_updated field as per the Atom specifications, however
# there is no real data to go by, and an arbitrary date can be static.
if qs.count() == 0:
return datetime(year=2008, month=7, day=1)
return qs.latest('date_created').date_created
def feed_links(self):
return ({'href': reverse('posts_list')},)
def items(self):
posts_list = Post.objects.filter(feed__subscriber__site=self.site
).order_by("-date_created")[:ITEMS_PER_FEED]
return posts_list
class SubscriberFeed(BasePostFeed):
def get_object(self, params):
return get_object_or_404(Subscriber, pk=params[0], is_active=True)
def feed_id(self, subscriber):
return reverse("subscriber_show", args=(subscriber.pk, ))
def feed_title(self, subscriber):
return _("Posts by %s - %s") % (subscriber.name, self.site.name)
def feed_updated(self, subscriber):
qs = Post.objects.filter(feed__subscriber=subscriber).distinct()
# We return an arbitrary date if there are no results, because there
# must be a feed_updated field as per the Atom specifications, however
# there is no real data to go by, and an arbitrary date can be static.
if qs.count() == 0:
return datetime(year=2008, month=7, day=1)
return qs.latest('date_created').date_created
def feed_links(self, subscriber):
return ({'href': reverse("subscriber_show", args=(subscriber.pk, ))},)
def items(self, subscriber):
return Post.objects.filter(feed__subscriber=subscriber,
).distinct().order_by("-date_created")[:ITEMS_PER_FEED]
class BlogFeed(BasePostFeed):
def get_object(self, params):
return get_object_or_404(Feed, pk=params[0], is_active=True)
def feed_id(self, feed):
return reverse("feed_show", args=(feed.pk, ))
def feed_title(self, feed):
return _("Posts in %s - %s") % (feed.title, self.site.name)
def feed_subtitle(self, feed):
return "%s - %s" % (feed.tagline, feed.link)
def feed_updated(self, feed):
qs = Post.objects.filter(feed=feed,
feed__subscriber__site=self.site).distinct()
# We return an arbitrary date if there are no results, because there
# must be a feed_updated field as per the Atom specifications, however
# there is no real data to go by, and an arbitrary date can be static.
if qs.count() == 0:
return datetime(year=2008, month=7, day=1)
return qs.latest('date_created').date_created
def feed_links(self, feed):
return ({'href': reverse("feed_show", args=(feed.pk, ))},)
def items(self, feed):
return Post.objects.filter(feed=feed,
feed__subscriber__site=self.site).distinct(
).order_by("-date_created")[:ITEMS_PER_FEED]
class TagFeed(BasePostFeed):
def get_object(self, params):
return get_object_or_404(Tag, name=params[0])
def feed_id(self, tag):
return reverse("tag_show", args=(tag.pk, ))
def feed_title(self, tag):
return _("Posts under %s tag - %s") % (tag, self.site.name)
def feed_updated(self, tag):
qs = Post.objects.filter(tags__name=tag,
feed__subscriber__site=self.site).distinct()
# We return an arbitrary date if there are no results, because there
# must be a feed_updated field as per the Atom specifications, however
# there is no real data to go by, and an arbitrary date can be static.
if qs.count() == 0:
return datetime(year=2008, month=7, day=1)
return qs.latest('date_created').date_created
def feed_links(self, tag):
return ({'href': reverse("tag_show", args=(tag.pk, ))},)
def items(self, tag):
return Post.objects.filter(tags__name=tag, feed__subscriber__site=self.site
).distinct().order_by("-date_created")[:ITEMS_PER_FEED]
class SubscriberTagFeed(BasePostFeed):
def __init__(self, *args, **kwargs):
super(SubscriberTagFeed, self).__init__(args, kwargs)
self.tag = kwargs["tag"]
def get_object(self, params):
return get_object_or_404(Subscriber, pk=params[0], is_active=True)
def feed_id(self, subscriber):
return reverse("by_tag_subscriber_show", args=(subscriber.pk, self.tag))
def feed_title(self, subscriber):
return _("Posts by %s under %s tag - %s")\
% (subscriber.name, self.tag, self.site.name)
def feed_updated(self, subscriber):
qs = Post.objects.filter(feed__subscriber=subscriber,
tags__name=self.tag).distinct()
# We return an arbitrary date if there are no results, because there
# must be a feed_updated field as per the Atom specifications, however
# there is no real data to go by, and an arbitrary date can be static.
if qs.count() == 0:
return datetime(year=2008, month=7, day=1)
return qs.latest('date_created').date_created
def feed_links(self, subscriber):
return ({'href': reverse("by_tag_subscriber_show", args=(subscriber.pk, self.tag))},)
def items(self, subscriber):
return Post.objects.filter(
feed__subscriber=subscriber, tags__name=self.tag
).distinct().order_by("-date_created")[:ITEMS_PER_FEED]
class SubscriberFeedChooser:
def __init__(self, *args, **kwargs):
self.request = args[1]
self.url = args[0]
def get_feed(self, params):
check_params = params.split("/")
if len(check_params) == 1:
return SubscriberFeed(self.url, self.request).get_feed(params)
else:
if not len(check_params) == 3:
raise Http404, "Feed does not exist"
if not check_params[1] == "tags":
raise Http404, "Feed does not exist"
tag = check_params[2]
return SubscriberTagFeed(self.url, self.request, tag=tag).get_feed(params)
def rss_feed(request, tag=None, subscriber_id=None):
site = get_object_or_404(Site, pk=settings.FEEDJACK_SITE_ID)
params_dict = {"feed__subscriber__site": site}
pretitle = ""
title = "%s (RSS Feed)" % site.title
if tag:
params_dict.update({"tags__name": tag})
pretitle = "%s %s " % (tag, _("in"))
if subscriber_id:
params_dict.update({"feed__subscriber": subscriber_id})
subscriber = Subscriber.objects.get(pk=subscriber_id)
pretitle = "%s %s " % (subscriber.name, _("in"))
try:
posts_count = settings.FJ_EXTENSION_ITEMS_PER_FEED
except AttributeError:
posts_count = 50
object_list = Post.objects.filter(**params_dict).distinct()[:posts_count]
feed = feedgenerator.Rss201rev2Feed(title=pretitle + title,
link=site.url, description=site.description,
feed_url=os.path.join(site.url, reverse("default_feed")))
for post in object_list:
feed.add_item(
title = '%s: %s' % (post.feed.name, post.title),
link = reverse("post_show", args=(post.pk,)),
description = post.content,
author_email = post.author_email,
author_name = post.author,
pubdate = post.date_modified,
unique_id = post.link,
categories = [tag.name for tag in post.tags.all()]
)
response = HttpResponse(mimetype=feed.mime_type)
# per host caching
patch_vary_headers(response, ['Host'])
feed.write(response, 'utf-8')
return response | Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
"""
from django import forms
from django.utils.translation import ugettext as _
from feedjack.models import Tag, Subscriber, Post
SEARCH_CHOICES = (
("posts", _("Posts")),
("tags", _("Tags")),
("blogs", _("Blogs")),
("authors", _("Authors")),
)
class SearchForm(forms.Form):
w = forms.ChoiceField(choices=SEARCH_CHOICES, label="")
q = forms.CharField(max_length=100, label="")
| Python |
# -*- coding: utf-8 -*-
"""
Based on Feedjack urls.py by Gustavo Picón
urls.py
"""
from django.conf.urls.defaults import patterns, url
urlpatterns = patterns('feedjack_extension.views',
url(r'^subscribers/(?P<subscriber_id>\d+)/tags/(?P<tag>.*)/$', "subscriber_show", name="by_tag_subscriber_show"),
url(r'^subscribers/(?P<subscriber_id>\d+)/$', "subscriber_show", name="subscriber_show"),
url(r'^subscribers/$', "subscribers_list", name="subscribers_list"),
url(r'^feeds/(?P<feed_id>\d+)/tags/(?P<tag>.*)/$', "feed_show", name="by_tag_feed_show"),
url(r'^feeds/(?P<feed_id>\d+)/$', "feed_show", name="feed_show"),
url(r'^feeds/$', "feeds_list", name="feeds_list"),
url(r'^tags/(?P<tag>.*)/feeds/$', "tag_feeds_list", name="tag_feeds_list"),
url(r'^tags/(?P<tag>.*)/subscribers/$', "tag_subscribers_list", name="tag_subscribers_list"),
url(r'^tags/(?P<tag>.*)/$', "tag_show", name="tag_show"),
url(r'^tags/$', "tags_cloud", name="tags_cloud"),
url(r'^opml/$', "opml", name="opml"),
url(r'^foaf/$', "foaf", name="foaf"),
url(r'^posts/(?P<post_id>\d+)/$', "post_show", name="post_show"),
url(r'^posts/$', "posts_list", name="posts_list"),
url(r'^search/$', "search", name="search"),
)
urlpatterns += patterns('feedjack_extension.feeds',
url(r'^posts/rss20.xml$', "rss_feed", name="default_feed"),
url(r'^posts/feeds/rss/$', "rss_feed", name="rss_feed"),
url(r'^feeds/rss/tags/(?P<tag>.*)/$', "rss_feed", name="tag_rss_feed"),
url(r'^feeds/rss/subscribers/(?P<subscriber_id>\d+)/$', "rss_feed", name="subscriber_rss_feed"),
url(r'^feeds/rss/subscribers/(?P<subscriber_id>\d+)/tags/(?P<tag>.*)/$', "rss_feed", name="tag_subscriber_rss_feed"),
)
#url(r'^posts/feed/$', "atom_feed", name="atom_feed"),
#url(r'^posts/feed/$', "atom_feed", name="default_feed"),
#url(r'^posts/feeds/atom/$', "atom_feed", name="atom_feed"),
#url(r'^feeds/tags/(?P<tag>.*)/$', "atom_feed", name="tag_feed"),
#url(r'^feeds/atom/tags/(?P<tag>.*)/$', "atom_feed", name="tag_atom_feed"),
#url(r'^feeds/subscribers/(?P<subscriber_id>\d+)/$', "atom_feed", name="subscriber_feed"),
#url(r'^feeds/atom/subscribers/(?P<subscriber_id>\d+)/$', "atom_feed", name="subscriber_atom_feed"),
#url(r'^feeds/subscribers/(?P<subscriber_id>\d+)/tag/(?P<tag>.*)/$', "atom_feed", name="subscriber_tag_feed"),
#url(r'^feeds/atom/subscribers/(?P<subscriber_id>\d+)/tag/(?P<tag>.*)/$', "atom_feed", name="tag_subscriber_atom_feed"),
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
"""
from django import template
from django.db.models import Count, Max, F
from django.conf import settings
from feedjack.models import Tag, Subscriber, Feed
register = template.Library()
@register.simple_tag
def normalize_number(number, max_values, levels=16):
return 14 + ((float(number) / float(max_values)) * levels)
@register.inclusion_tag('tags/blocks/cloud.html')
def tags_cloud(min_posts_count=1):
max_posts = Tag.objects.annotate(count=Count("post"))
max_posts = max_posts.filter(
count__gt=min_posts_count, name__isnull=False)
max_posts = max_posts.aggregate(Max("count"))
max_posts_count = max_posts["count__max"]
tags_cloud = Tag.objects.annotate(count=Count("post"))
tags_cloud = tags_cloud.filter(name__isnull=False,
count__gt=min_posts_count).order_by("name")
return {"tags_cloud": tags_cloud, "max_posts_count": max_posts_count}
@register.inclusion_tag('tags/blocks/subscribers_cloud.html')
def tags_cloud_for_subscriber(subscriber=None, min_posts_count=0):
max_posts = Tag.objects.annotate(count=Count("post"))
max_posts = max_posts.filter(name__isnull=False,
count__gt=min_posts_count, post__feed__subscriber=subscriber)
max_posts = max_posts.aggregate(Max("count"))
max_posts_count = max_posts["count__max"]
tags_cloud = Tag.objects.annotate(count=Count("post"))
tags_cloud = tags_cloud.filter(name__isnull=False,
count__gt=min_posts_count, post__feed__subscriber=subscriber
).order_by("name")
return {"subscriber": subscriber, "tags_cloud": tags_cloud,
"max_posts_count": max_posts_count}
@register.inclusion_tag('tags/blocks/feeds_cloud.html')
def tags_cloud_for_feed(feed=None, min_posts_count=0):
max_posts = Tag.objects.annotate(count=Count("post"))
max_posts = max_posts.filter(name__isnull=False,
count__gt=min_posts_count, post__feed=feed)
max_posts = max_posts.aggregate(Max("count"))
max_posts_count = max_posts["count__max"]
tags_cloud = Tag.objects.annotate(count=Count("post"))
tags_cloud = tags_cloud.filter(name__isnull=False,
count__gt=min_posts_count, post__feed=feed
).order_by("name")
return {"feed": feed, "tags_cloud": tags_cloud,
"max_posts_count": max_posts_count}
@register.inclusion_tag('subscribers/blocks/list_for_tag.html')
def subscribers_about(tag):
subscribers = Subscriber.objects.filter(site=settings.FEEDJACK_SITE_ID,
feed__post__tags__name=tag).distinct()
return {"subscribers": subscribers, "tag": tag}
@register.inclusion_tag('feeds/blocks/list_for_tag.html')
def feeds_about(tag):
feeds_list = Feed.objects.filter(
subscriber__site=settings.FEEDJACK_SITE_ID,
post__tags__name=tag).distinct()
return {"feeds_list": feeds_list, "tag": tag}
@register.inclusion_tag("tags/blocks/related_list.html")
def related_tags_for(tag, count=20):
related_tags = Tag.objects.filter(post__tags=tag)\
.annotate(Count("post")).exclude(name=tag.name)\
.distinct().order_by("-post__count", "name")[:count]
max_posts_count = related_tags and related_tags[0].post__count or 0
return {"related_tags": related_tags, "max_posts_count": max_posts_count}
@register.simple_tag
def tags_count_for(instance):
if isinstance(instance, Feed):
return Tag.objects.filter(post__feed=instance
).distinct().count()
elif isinstance(instance, Subscriber):
return Tag.objects.filter(post__feed__subscriber=instance
).distinct().count()
else:
return 0
@register.inclusion_tag("feeds/blocks/related_feeds.html")
def related_feeds_for(feed, count=10):
feed_tags = Tag.objects.filter(post__feed=feed
).distinct()
related_feeds = []
if feed_tags:
related_feeds = Feed.objects.filter(post__tags__in=feed_tags
).exclude(pk=feed.pk).distinct()[:count]
return {"related_feeds": related_feeds}
@register.inclusion_tag("posts/details.html")
def post_details(post):
return {"post": post}
#@register.simple_tag
#def year_cloud_for(instance):
#if isinstance(instance, Feed):
#return Post.objects.values("date_modified").distinct().annotate(Count("pk")).order_by("date_modified")
#elif isinstance(instance, Subscriber):
#return []
#else:
#return [] | Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
"""
from django import template
from django.db.models import Count, Max, F
from django.conf import settings
from feedjack.models import Tag, Subscriber, Feed
register = template.Library()
@register.simple_tag
def normalize_number(number, max_values, levels=16):
return 14 + ((float(number) / float(max_values)) * levels)
@register.inclusion_tag('tags/blocks/cloud.html')
def tags_cloud(min_posts_count=1):
max_posts = Tag.objects.annotate(count=Count("post"))
max_posts = max_posts.filter(
count__gt=min_posts_count, name__isnull=False)
max_posts = max_posts.aggregate(Max("count"))
max_posts_count = max_posts["count__max"]
tags_cloud = Tag.objects.annotate(count=Count("post"))
tags_cloud = tags_cloud.filter(name__isnull=False,
count__gt=min_posts_count).order_by("name")
return {"tags_cloud": tags_cloud, "max_posts_count": max_posts_count}
@register.inclusion_tag('tags/blocks/subscribers_cloud.html')
def tags_cloud_for_subscriber(subscriber=None, min_posts_count=0):
max_posts = Tag.objects.annotate(count=Count("post"))
max_posts = max_posts.filter(name__isnull=False,
count__gt=min_posts_count, post__feed__subscriber=subscriber)
max_posts = max_posts.aggregate(Max("count"))
max_posts_count = max_posts["count__max"]
tags_cloud = Tag.objects.annotate(count=Count("post"))
tags_cloud = tags_cloud.filter(name__isnull=False,
count__gt=min_posts_count, post__feed__subscriber=subscriber
).order_by("name")
return {"subscriber": subscriber, "tags_cloud": tags_cloud,
"max_posts_count": max_posts_count}
@register.inclusion_tag('tags/blocks/feeds_cloud.html')
def tags_cloud_for_feed(feed=None, min_posts_count=0):
max_posts = Tag.objects.annotate(count=Count("post"))
max_posts = max_posts.filter(name__isnull=False,
count__gt=min_posts_count, post__feed=feed)
max_posts = max_posts.aggregate(Max("count"))
max_posts_count = max_posts["count__max"]
tags_cloud = Tag.objects.annotate(count=Count("post"))
tags_cloud = tags_cloud.filter(name__isnull=False,
count__gt=min_posts_count, post__feed=feed
).order_by("name")
return {"feed": feed, "tags_cloud": tags_cloud,
"max_posts_count": max_posts_count}
@register.inclusion_tag('subscribers/blocks/list_for_tag.html')
def subscribers_about(tag):
subscribers = Subscriber.objects.filter(site=settings.FEEDJACK_SITE_ID,
feed__post__tags__name=tag).distinct()
return {"subscribers": subscribers, "tag": tag}
@register.inclusion_tag('feeds/blocks/list_for_tag.html')
def feeds_about(tag):
feeds_list = Feed.objects.filter(
subscriber__site=settings.FEEDJACK_SITE_ID,
post__tags__name=tag).distinct()
return {"feeds_list": feeds_list, "tag": tag}
@register.inclusion_tag("tags/blocks/related_list.html")
def related_tags_for(tag, count=20):
related_tags = Tag.objects.filter(post__tags=tag)\
.annotate(Count("post")).exclude(name=tag.name)\
.distinct().order_by("-post__count", "name")[:count]
max_posts_count = related_tags and related_tags[0].post__count or 0
return {"related_tags": related_tags, "max_posts_count": max_posts_count}
@register.simple_tag
def tags_count_for(instance):
if isinstance(instance, Feed):
return Tag.objects.filter(post__feed=instance
).distinct().count()
elif isinstance(instance, Subscriber):
return Tag.objects.filter(post__feed__subscriber=instance
).distinct().count()
else:
return 0
@register.inclusion_tag("feeds/blocks/related_feeds.html")
def related_feeds_for(feed, count=10):
feed_tags = Tag.objects.filter(post__feed=feed
).distinct()
related_feeds = []
if feed_tags:
related_feeds = Feed.objects.filter(post__tags__in=feed_tags
).exclude(pk=feed.pk).distinct()[:count]
return {"related_feeds": related_feeds}
@register.inclusion_tag("posts/details.html")
def post_details(post):
return {"post": post}
#@register.simple_tag
#def year_cloud_for(instance):
#if isinstance(instance, Feed):
#return Post.objects.values("date_modified").distinct().annotate(Count("pk")).order_by("date_modified")
#elif isinstance(instance, Subscriber):
#return []
#else:
#return [] | Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import feedparser
from django.core.management.base import BaseCommand
from django.conf import settings
from feedjack.models import Feed, Subscriber
class Command(BaseCommand):
def handle(self, *args, **options):
if not len(args):
print "You must provide the feed url as parameter"
exit(0)
url = args[0]
document = feedparser.parse(url)
try:
title = document.feed.title
except AttributeError:
title = "--"
try:
author_name = document.feed.author_detail.name
if not author_name:
author_name = document.feed.author
except AttributeError:
author_name = None
try:
feed = Feed(feed_url=url, name=title, shortname=title)
feed.save()
except:
print "That feed is already saved."
exit(0)
subscriber = Subscriber()
subscriber.site_id = settings.FEEDJACK_SITE_ID
subscriber.feed = feed
subscriber.name = author_name
subscriber.shortname = author_name
subscriber.save()
print "done" | Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import feedparser
from django.core.management.base import BaseCommand
from django.conf import settings
from feedjack.models import Feed, Subscriber
class Command(BaseCommand):
def handle(self, *args, **options):
if not len(args):
print "You must provide the feed url as parameter"
exit(0)
url = args[0]
document = feedparser.parse(url)
try:
title = document.feed.title
except AttributeError:
title = "--"
try:
author_name = document.feed.author_detail.name
if not author_name:
author_name = document.feed.author
except AttributeError:
author_name = None
try:
feed = Feed(feed_url=url, name=title, shortname=title)
feed.save()
except:
print "That feed is already saved."
exit(0)
subscriber = Subscriber()
subscriber.site_id = settings.FEEDJACK_SITE_ID
subscriber.feed = feed
subscriber.name = author_name
subscriber.shortname = author_name
subscriber.save()
print "done" | Python |
# -*- coding: utf-8 -*-
from django.utils import feedgenerator
from django.utils.cache import patch_vary_headers
from django.shortcuts import render_to_response, get_object_or_404
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect, HttpResponse
from django.template import Context, loader, RequestContext
from django.conf import settings
from django.db.models import Count, Max
from django.utils.translation import ugettext_lazy as _
from feedjack.models import Site, Feed, Subscriber, Post, Tag
from feedjack_extension.forms import SearchForm
def subscribers_list(request):
subscribers = Subscriber.objects.filter(site=settings.FEEDJACK_SITE_ID)
return render_to_response("subscribers/list.html",
{"subscribers_list": subscribers},
context_instance=RequestContext(request))
def subscriber_show(request, subscriber_id, tag=None):
subscriber = get_object_or_404(Subscriber, pk=subscriber_id)
params_dict = {"feed__subscriber": subscriber,
"feed__subscriber__site": settings.FEEDJACK_SITE_ID}
if tag:
tag = get_object_or_404(Tag, name=tag)
params_dict.update({"tags": tag})
posts = Post.objects.filter(**params_dict).order_by("-date_created")
return render_to_response("subscribers/show.html",
{"subscriber": subscriber, "posts": posts, "tag": tag},
context_instance=RequestContext(request))
def feeds_list(request):
feeds_list = Feed.objects.filter(
subscriber__site=settings.FEEDJACK_SITE_ID)
return render_to_response("feeds/list.html", {"feeds_list": feeds_list},
context_instance=RequestContext(request))
def feed_show(request, feed_id, tag=None):
feed = get_object_or_404(Feed, pk=feed_id)
params_dict = {"feed": feed,
"feed__subscriber__site": settings.FEEDJACK_SITE_ID}
if tag:
tag = get_object_or_404(Tag, name=tag)
params_dict.update({"tags": tag})
posts = Post.objects.filter(**params_dict).order_by("-date_created")
return render_to_response("feeds/show.html",
{"feed": feed, "posts": posts, "tag": tag},
context_instance=RequestContext(request))
def posts_list(request):
posts = Post.objects.filter(feed__subscriber__site=settings.FEEDJACK_SITE_ID
).distinct().order_by("-date_created")
return render_to_response("posts/list.html", {"posts": posts},
context_instance=RequestContext(request))
def post_show(request, post_id):
post = get_object_or_404(Post, pk=post_id)
return render_to_response("posts/show.html", {"post": post},
context_instance=RequestContext(request))
def tag_show(request, tag):
tag = get_object_or_404(Tag, name=tag)
posts = Post.objects.filter(tags=tag,
feed__subscriber__site=settings.FEEDJACK_SITE_ID
).distinct().order_by("-date_created")
return render_to_response("tags/show.html", {"posts": posts, "tag": tag},
context_instance=RequestContext(request))
def tag_subscribers_list(request, tag):
tag = get_object_or_404(Tag, name=tag)
subscribers = Subscriber.objects.filter(site=settings.FEEDJACK_SITE_ID,
feed__post__tags__name=tag).distinct()
return render_to_response("subscribers/list_for_tag.html",
{"subscribers": subscribers, "tag": tag},
context_instance=RequestContext(request))
def tag_feeds_list(request, tag):
tag = get_object_or_404(Tag, name=tag)
feeds_list = Feed.objects.filter(post__tags__name=tag,
subscriber__site=settings.FEEDJACK_SITE_ID).distinct()
return render_to_response("feeds/list_for_tag.html",
{"feeds_list": feeds_list, "tag": tag},
context_instance=RequestContext(request))
def tags_cloud(request, min_posts_count=1):
max_posts_count = Tag.objects.annotate(count=Count("post")
).filter(count__gt=min_posts_count, name__isnull=False).aggregate(Max("count"))
max_posts_count = max_posts_count["count__max"]
tags_cloud = Tag.objects.annotate(count=Count("post")
).filter(count__gt=min_posts_count, name__isnull=False).order_by("name")
return render_to_response("tags/cloud.html",
{"tags_cloud": tags_cloud, "max_posts_count": max_posts_count},
context_instance=RequestContext(request))
def foaf(request):
feeds = Feed.objects.filter(
subscriber__site=settings.FEEDJACK_SITE_ID).distinct()
return render_to_response("microformats/foaf.xml", {"feeds": feeds},
context_instance=RequestContext(request), mimetype="text/xml")
def opml(request):
feeds = Feed.objects.filter(
subscriber__site=settings.FEEDJACK_SITE_ID).distinct()
return render_to_response("microformats/opml.xml", {"feeds": feeds},
context_instance=RequestContext(request), mimetype="text/xml")
def search(request):
if request.method == "GET" and request.GET.get("search") == "go":
search_form = SearchForm(request.GET)
if search_form.is_valid():
query = search_form.cleaned_data["q"]
if search_form.cleaned_data["w"] == "posts":
params_dict = {"title__icontains": query,
"feed__subscriber__site": settings.FEEDJACK_SITE_ID}
posts = Post.objects.filter(**params_dict
).distinct().order_by("-date_created")
return render_to_response("posts/list.html", {"posts": posts},
context_instance=RequestContext(request))
elif search_form.cleaned_data["w"] == "tags":
params_dict = {"name__icontains": query,
"post__feed__subscriber__site": settings.FEEDJACK_SITE_ID}
tags_list = Tag.objects.filter(**params_dict
).distinct().order_by("name")
return render_to_response("tags/list.html",
{"tags_list": tags_list},
context_instance=RequestContext(request))
elif search_form.cleaned_data["w"] == "blogs":
params_dict = {"name__icontains": query,
"subscriber__site": settings.FEEDJACK_SITE_ID}
feeds_list = Feed.objects.filter(**params_dict
).distinct().order_by("name")
return render_to_response("feeds/list.html",
{"feeds_list": feeds_list},
context_instance=RequestContext(request))
elif search_form.cleaned_data["w"] == "authors":
params_dict = {"name__icontains": query,
"feed__subscriber__site": settings.FEEDJACK_SITE_ID}
subscribers_list = Subscriber.objects.filter(**params_dict
).distinct().order_by("name")
return render_to_response("subscribers/list.html",
{"subscribers_list": subscribers_list},
context_instance=RequestContext(request))
else:
return HttpResponseRedirect(reverse("posts_list"))
else:
return HttpResponseRedirect(reverse("posts_list"))
else:
return HttpResponseRedirect(reverse("posts_list")) | Python |
#!/usr/bin/env python
from django.core.management import execute_manager
try:
import settings # Assumed to be in the same directory.
except ImportError:
import sys
sys.stderr.write("Error: Can't find the file 'settings.py' in the directory containing %r. It appears you've customized things.\nYou'll have to run django-admin.py, passing it your settings module.\n(If the file settings.py does indeed exist, it's causing an ImportError somehow.)\n" % __file__)
sys.exit(1)
if __name__ == "__main__":
execute_manager(settings)
| Python |
# Django settings for djangoblogs project.
import deseb
import os
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@domain.com'),
)
MANAGERS = ADMINS
DATABASE_ENGINE = 'sqlite3' # 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'ado_mssql'.
DATABASE_NAME = 'sample.db' # Or path to database file if using sqlite3.
DATABASE_USER = '' # Not used with sqlite3.
DATABASE_PASSWORD = '' # Not used with sqlite3.
DATABASE_HOST = '' # Set to empty string for localhost. Not used with sqlite3.
DATABASE_PORT = '' # Set to empty string for default. Not used with sqlite3.
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/BuenosAires'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
SITE_ROOT = os.path.dirname(os.path.realpath(__file__))
# URL that handles the media served from MEDIA_ROOT.
# Example: "http://media.lawrence.com"
MEDIA_URL = 'http://localhost:8000/media/'
# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/".
ADMIN_MEDIA_PREFIX = '/admin/media/'
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'jxjd5h&^5qqy%2gv7fok@7hgu(vjak-zfa8n8jcgzts0l%-%-*'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.load_template_source',
'django.template.loaders.app_directories.load_template_source',
# 'django.template.loaders.eggs.load_template_source',
)
MIDDLEWARE_CLASSES = (
'django.middleware.cache.UpdateCacheMiddleware',
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.middleware.doc.XViewMiddleware',
'pagination.middleware.PaginationMiddleware',
'django.middleware.cache.FetchFromCacheMiddleware',
)
ROOT_URLCONF = 'urls'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(SITE_ROOT, "templates"),
)
TEMPLATE_CONTEXT_PROCESSORS = (
"django.core.context_processors.auth",
"django.core.context_processors.debug",
"django.core.context_processors.i18n",
"django.core.context_processors.media",
"django.core.context_processors.request",
"feedjack_extension.context_processors.context",
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.admin',
'pagination',
'feedjack',
'feedjack_extension',
)
SESSION_COOKIE_NAME = "sampleid"
# tagging app settings
MAX_TAG_LENGTH = 255 # default: 50
FORCE_LOWERCASE_TAGS = True # default: True
STATIC_MEDIA_ROOT = os.path.join(SITE_ROOT, "media")
FEEDJACK_SITE_ID = 1
CACHE_BACKEND = "locmem:///"
CACHE_MIDDLEWARE_SECONDS = 3600
FJ_EXTENSION_ITEMS_PER_FEED = 20
| Python |
#!/usr/bin/env python
from django.core.management import execute_manager
try:
import settings # Assumed to be in the same directory.
except ImportError:
import sys
sys.stderr.write("Error: Can't find the file 'settings.py' in the directory containing %r. It appears you've customized things.\nYou'll have to run django-admin.py, passing it your settings module.\n(If the file settings.py does indeed exist, it's causing an ImportError somehow.)\n" % __file__)
sys.exit(1)
if __name__ == "__main__":
execute_manager(settings)
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from django.views.generic.simple import redirect_to
from django.conf.urls.defaults import *
from django.conf import settings
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# fake index view
(r'^$', redirect_to, {'url': '/posts/'}),
(r'^', include('feedjack_extension.urls')),
# Uncomment the admin/doc line below and add 'django.contrib.admindocs'
# to INSTALLED_APPS to enable admin documentation:
# (r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
(r'^admin/(.*)', admin.site.root),
# in order to use css when running with django server
(r'^media/(?P<path>.*)$', 'django.views.static.serve',
{'document_root': settings.STATIC_MEDIA_ROOT}),
)
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from django.views.generic.simple import redirect_to
from django.conf.urls.defaults import *
from django.conf import settings
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# fake index view
(r'^$', redirect_to, {'url': '/posts/'}),
(r'^', include('feedjack_extension.urls')),
# Uncomment the admin/doc line below and add 'django.contrib.admindocs'
# to INSTALLED_APPS to enable admin documentation:
# (r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
(r'^admin/(.*)', admin.site.root),
# in order to use css when running with django server
(r'^media/(?P<path>.*)$', 'django.views.static.serve',
{'document_root': settings.STATIC_MEDIA_ROOT}),
)
| Python |
# -*- coding: utf-8 -*-
"""
feedjack
Gustavo Picón
fjcache.py
"""
import md5
from django.core.cache import cache
from django.conf import settings
T_HOST = 1
T_ITEM = 2
T_META = 3
def str2md5(key):
""" Returns the md5 hash of a string.
"""
ctx = md5.new()
ctx.update(key.encode('utf-8'))
return ctx.hexdigest()
def getkey(stype, site_id=None, key=None):
""" Returns the cache key depending on it's type.
"""
base = '%s.feedjack' % (settings.CACHE_MIDDLEWARE_KEY_PREFIX)
if stype == T_HOST:
return '%s.hostcache' % base
elif stype == T_ITEM:
return '%s.%d.item.%s' % (base, site_id, str2md5(key))
elif stype == T_META:
return '%s.%d.meta' % (base, site_id)
def hostcache_get():
""" Retrieves the hostcache dictionary
"""
return cache.get(getkey(T_HOST))
def hostcache_set(value):
""" Sets the hostcache dictionary
"""
cache.set(getkey(T_HOST), value)
def cache_get(site_id, key):
""" Retrieves cache data from a site.
"""
return cache.get(getkey(T_ITEM, site_id, key))
def cache_set(site, key, data):
""" Sets cache data for a site.
All keys related to a site are stored in a meta key. This key is per-site.
"""
tkey = getkey(T_ITEM, site.id, key)
mkey = getkey(T_META, site.id)
tmp = cache.get(mkey)
longdur = 365*24*60*60
if not tmp:
tmp = [tkey]
cache.set(mkey, [tkey], longdur)
elif tkey not in tmp:
tmp.append(tkey)
cache.set(mkey, tmp, longdur)
cache.set(tkey, data, site.cache_duration)
def cache_delsite(site_id):
""" Removes all cache data from a site.
"""
mkey = getkey(T_META, site_id)
tmp = cache.get(mkey)
if not tmp:
return
for tkey in tmp:
cache.delete(tkey)
cache.delete(mkey)
| Python |
Subsets and Splits
SQL Console for ajibawa-2023/Python-Code-Large
Provides a useful breakdown of language distribution in the training data, showing which languages have the most samples and helping identify potential imbalances across different language groups.