repo_name stringlengths 6 100 | path stringlengths 4 294 | copies stringlengths 1 5 | size stringlengths 4 6 | content stringlengths 606 896k | license stringclasses 15
values |
|---|---|---|---|---|---|
viaregio/cartridge | cartridge/shop/migrations/0005_auto__add_field_product_rating_count__add_field_product_rating_average.py | 2 | 21083 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Product.rating_count'
db.add_column('shop_product', 'rating_count', self.gf('django.db.models.fields.IntegerField')(default=0), keep_default=False)
# Adding field 'Product.rating_average'
db.add_column('shop_product', 'rating_average', self.gf('django.db.models.fields.FloatField')(default=0), keep_default=False)
def backwards(self, orm):
# Deleting field 'Product.rating_count'
db.delete_column('shop_product', 'rating_count')
# Deleting field 'Product.rating_average'
db.delete_column('shop_product', 'rating_average')
models = {
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'generic.assignedkeyword': {
'Meta': {'object_name': 'AssignedKeyword'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'keyword': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'assignments'", 'to': "orm['generic.Keyword']"}),
'object_pk': ('django.db.models.fields.TextField', [], {})
},
'generic.keyword': {
'Meta': {'object_name': 'Keyword'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'generic.rating': {
'Meta': {'object_name': 'Rating'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_pk': ('django.db.models.fields.TextField', [], {}),
'value': ('django.db.models.fields.IntegerField', [], {})
},
'pages.page': {
'Meta': {'object_name': 'Page'},
'_order': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'content_model': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'expiry_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_footer': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'in_navigation': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'keywords': ('mezzanine.generic.fields.KeywordsField', [], {'object_id_field': "'object_pk'", 'to': "orm['generic.AssignedKeyword']"}),
'keywords_string': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
'login_required': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['pages.Page']"}),
'publish_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'short_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sites.Site']"}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'titles': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True'})
},
'shop.cart': {
'Meta': {'object_name': 'Cart'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'})
},
'shop.cartitem': {
'Meta': {'object_name': 'CartItem'},
'cart': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'items'", 'to': "orm['shop.Cart']"}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'quantity': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'sku': ('cartridge.shop.fields.SKUField', [], {'max_length': '20'}),
'total_price': ('cartridge.shop.fields.MoneyField', [], {'default': "'0'", 'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'unit_price': ('cartridge.shop.fields.MoneyField', [], {'default': "'0'", 'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'url': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'shop.category': {
'Meta': {'object_name': 'Category', '_ormbases': ['pages.Page']},
'combined': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'content': ('mezzanine.core.fields.HtmlField', [], {}),
'options': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'product_options'", 'blank': 'True', 'to': "orm['shop.ProductOption']"}),
'page_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['pages.Page']", 'unique': 'True', 'primary_key': 'True'}),
'price_max': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'price_min': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'sale': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['shop.Sale']", 'null': 'True', 'blank': 'True'})
},
'shop.discountcode': {
'Meta': {'object_name': 'DiscountCode'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'categories': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'discountcode_related'", 'blank': 'True', 'to': "orm['shop.Category']"}),
'code': ('cartridge.shop.fields.DiscountCodeField', [], {'unique': 'True', 'max_length': '20'}),
'discount_deduct': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'discount_exact': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'discount_percent': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '4', 'decimal_places': '2', 'blank': 'True'}),
'free_shipping': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'min_purchase': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'products': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['shop.Product']", 'symmetrical': 'False', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'valid_from': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'valid_to': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
'shop.order': {
'Meta': {'object_name': 'Order'},
'additional_instructions': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'billing_detail_city': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'billing_detail_country': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'billing_detail_email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'billing_detail_first_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'billing_detail_last_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'billing_detail_phone': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'billing_detail_postcode': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'billing_detail_state': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'billing_detail_street': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'discount_code': ('cartridge.shop.fields.DiscountCodeField', [], {'max_length': '20', 'blank': 'True'}),
'discount_total': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'item_total': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'shipping_detail_city': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'shipping_detail_country': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'shipping_detail_first_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'shipping_detail_last_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'shipping_detail_phone': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'shipping_detail_postcode': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'shipping_detail_state': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'shipping_detail_street': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'shipping_total': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'shipping_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'total': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'user_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
'shop.orderitem': {
'Meta': {'object_name': 'OrderItem'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'items'", 'to': "orm['shop.Order']"}),
'quantity': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'sku': ('cartridge.shop.fields.SKUField', [], {'max_length': '20'}),
'total_price': ('cartridge.shop.fields.MoneyField', [], {'default': "'0'", 'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'unit_price': ('cartridge.shop.fields.MoneyField', [], {'default': "'0'", 'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'})
},
'shop.product': {
'Meta': {'object_name': 'Product'},
'available': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'categories': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'products'", 'blank': 'True', 'to': "orm['shop.Category']"}),
'content': ('mezzanine.core.fields.HtmlField', [], {}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'expiry_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'keywords': ('mezzanine.generic.fields.KeywordsField', [], {'object_id_field': "'object_pk'", 'to': "orm['generic.AssignedKeyword']"}),
'keywords_string': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
'publish_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'rating': ('mezzanine.generic.fields.RatingField', [], {'object_id_field': "'object_pk'", 'to': "orm['generic.Rating']"}),
'rating_average': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'rating_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'related_products': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'related_products_rel_+'", 'blank': 'True', 'to': "orm['shop.Product']"}),
'sale_from': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'sale_id': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'sale_price': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'sale_to': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'short_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sites.Site']"}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'unit_price': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'upsell_products': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'upsell_products_rel_+'", 'blank': 'True', 'to': "orm['shop.Product']"})
},
'shop.productaction': {
'Meta': {'unique_together': "(('product', 'timestamp'),)", 'object_name': 'ProductAction'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'actions'", 'to': "orm['shop.Product']"}),
'timestamp': ('django.db.models.fields.IntegerField', [], {}),
'total_cart': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'total_purchase': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'shop.productimage': {
'Meta': {'object_name': 'ProductImage'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'file': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'images'", 'to': "orm['shop.Product']"})
},
'shop.productoption': {
'Meta': {'object_name': 'ProductOption'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('cartridge.shop.fields.OptionField', [], {'max_length': '50', 'null': 'True'}),
'type': ('django.db.models.fields.IntegerField', [], {})
},
'shop.productvariation': {
'Meta': {'object_name': 'ProductVariation'},
'default': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['shop.ProductImage']", 'null': 'True', 'blank': 'True'}),
'num_in_stock': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'option1': ('cartridge.shop.fields.OptionField', [], {'max_length': '50', 'null': 'True'}),
'option2': ('cartridge.shop.fields.OptionField', [], {'max_length': '50', 'null': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'variations'", 'to': "orm['shop.Product']"}),
'sale_from': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'sale_id': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'sale_price': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'sale_to': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'sku': ('cartridge.shop.fields.SKUField', [], {'unique': 'True', 'max_length': '20'}),
'unit_price': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'})
},
'shop.sale': {
'Meta': {'object_name': 'Sale'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'categories': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'sale_related'", 'blank': 'True', 'to': "orm['shop.Category']"}),
'discount_deduct': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'discount_exact': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'discount_percent': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '4', 'decimal_places': '2', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'products': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['shop.Product']", 'symmetrical': 'False', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'valid_from': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'valid_to': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
'sites.site': {
'Meta': {'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['shop']
| bsd-2-clause |
yhat/ggplot | ggplot/scales/scale_fill_funfetti.py | 1 | 1106 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
from .scale import scale
from copy import deepcopy
class scale_fill_funfetti(scale):
"""
Make your plots look like funfetti
Parameters
----------
type: string
One of confetti or sprinkles (defaults to sprinkles)
Examples
--------
>>> from ggplot import *
>>> p = ggplot(aes(x='carat', fill='clarity'), data=diamonds)
>>> p += geom_bar()
>>> print(p + scale_fill_funfetti())
"""
VALID_SCALES = ['type', 'palette']
def __radd__(self, gg):
color_maps = {
"confetti": [
"#a864fd",
"#29cdff",
"#78ff44",
"#ff718d",
"#fdff6a"
],
"sprinkles": [
"#F8909F",
"#C5DE9C",
"#8BF3EF",
"#F9AA50",
"#EDE5D9"
]
}
gg.manual_fill_list = color_maps.get(self.type, color_maps['sprinkles'])
return gg
| bsd-2-clause |
rplevka/robottelo | tests/foreman/cli/test_logging.py | 1 | 9907 | """CLI tests for logging.
:Requirement: Logging
:CaseAutomation: Automated
:CaseLevel: Acceptance
:CaseComponent: Logging
:Assignee: shwsingh
:TestType: Functional
:CaseImportance: Medium
:Upstream: No
"""
import re
import pytest
from fauxfactory import gen_string
from nailgun import entities
from robottelo import manifests
from robottelo import ssh
from robottelo.cli.subscription import Subscription
from robottelo.logging import logger
from robottelo.ssh import upload_file
def line_count(file, connection=None):
"""Get number of lines in a file."""
connection = connection or ssh.get_connection()
result = connection.run(f'wc -l < {file}', output_format='plain')
count = result.stdout.strip('\n')
return count
def cut_lines(start_line, end_line, source_file, out_file, connection=None):
"""Given start and end line numbers, cut lines from source file
and put them in out file."""
connection = connection or ssh.get_connection()
result = connection.run(
'sed -n "{0},{1} p" {2} < {2} > {3}'.format(start_line, end_line, source_file, out_file)
)
return result
@pytest.mark.tier4
def test_positive_logging_from_foreman_core():
"""Check that GET command to Hosts API is logged and has request ID.
:id: 0785260d-cb81-4351-a7cb-d7841335e2de
:expectedresults: line of log with GET has request ID
:CaseImportance: Medium
"""
GET_line_found = False
source_log = '/var/log/foreman/production.log'
test_logfile = '/var/tmp/logfile_from_foreman_core'
with ssh.get_connection() as connection:
# get the number of lines in the source log before the test
line_count_start = line_count(source_log, connection)
# hammer command for this test
result = connection.run('hammer host list')
assert result.return_code == 0, "BASH command error?"
# get the number of lines in the source log after the test
line_count_end = line_count(source_log, connection)
# get the log lines of interest, put them in test_logfile
cut_lines(line_count_start, line_count_end, source_log, test_logfile, connection)
# use same location on remote and local for log file extract
ssh.download_file(test_logfile)
# search the log file extract for the line with GET to host API
with open(test_logfile) as logfile:
for line in logfile:
if re.search(r'Started GET \"\/api/hosts\?page=1', line):
logger.info('Found the line with GET to hosts API')
GET_line_found = True
# Confirm the request ID was logged in the line with GET
match = re.search(r'\[I\|app\|\w{8}\]', line)
assert match, "Request ID not found"
logger.info("Request ID found for logging from foreman core")
break
assert GET_line_found, "The GET command to list hosts was not found in logs."
@pytest.mark.tier4
def test_positive_logging_from_foreman_proxy():
"""Check PUT to Smart Proxy API to refresh the features is logged and has request ID.
:id: 0ecd8406-6cf1-4520-b8b6-8a164a1e60c2
:expectedresults: line of log with PUT has request ID
:CaseImportance: Medium
"""
PUT_line_found = False
request_id = None
source_log_1 = '/var/log/foreman/production.log'
test_logfile_1 = '/var/tmp/logfile_1_from_proxy'
source_log_2 = '/var/log/foreman-proxy/proxy.log'
test_logfile_2 = '/var/tmp/logfile_2_from_proxy'
with ssh.get_connection() as connection:
# get the number of lines in the source logs before the test
line_count_start_1 = line_count(source_log_1, connection)
line_count_start_2 = line_count(source_log_2, connection)
# hammer command for this test
result = connection.run('hammer proxy refresh-features --id 1')
assert result.return_code == 0, "BASH command error?"
# get the number of lines in the source logs after the test
line_count_end_1 = line_count(source_log_1, connection)
line_count_end_2 = line_count(source_log_2, connection)
# get the log lines of interest, put them in test_logfile_1
cut_lines(line_count_start_1, line_count_end_1, source_log_1, test_logfile_1, connection)
# get the log lines of interest, put them in test_logfile_2
cut_lines(line_count_start_2, line_count_end_2, source_log_2, test_logfile_2, connection)
# use same location on remote and local for log file extract
ssh.download_file(test_logfile_1)
# use same location on remote and local for log file extract
ssh.download_file(test_logfile_2)
# search the log file extract for the line with PUT to host API
with open(test_logfile_1) as logfile:
for line in logfile:
if re.search(r'Started PUT \"\/api\/smart_proxies\/1\/refresh', line):
logger.info('Found the line with PUT to foreman proxy API')
PUT_line_found = True
# Confirm the request ID was logged in the line with PUT
match = re.search(r'\[I\|app\|\w{8}\]', line)
assert match, "Request ID not found"
logger.info("Request ID found for logging from foreman proxy")
p = re.compile(r"\w{8}")
result = p.search(line)
request_id = result.group(0)
break
assert PUT_line_found, "The PUT command to refresh proxies was not found in logs."
# search the local copy of proxy.log file for the same request ID
with open(test_logfile_2) as logfile:
for line in logfile:
# Confirm request ID was logged in proxy.log
match = line.find(request_id)
assert match, "Request ID not found in proxy.log"
logger.info("Request ID also found in proxy.log")
break
@pytest.mark.tier4
def test_positive_logging_from_candlepin(module_org):
"""Check logging after manifest upload.
:id: 8c06e501-52d7-4baf-903e-7de9caffb066
:expectedresults: line of logs with POST has request ID
:CaseImportance: Medium
"""
POST_line_found = False
source_log = '/var/log/candlepin/candlepin.log'
test_logfile = '/var/tmp/logfile_from_candlepin'
# regex for a version 4 UUID (8-4-4-12 format)
regex = r"\b[0-9a-f]{8}\b-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-\b[0-9a-f]{12}\b"
with ssh.get_connection() as connection:
# get the number of lines in the source log before the test
line_count_start = line_count(source_log, connection)
# command for this test
with manifests.clone() as manifest:
upload_file(manifest.content, manifest.filename)
Subscription.upload({'file': manifest.filename, 'organization-id': module_org.id})
# get the number of lines in the source log after the test
line_count_end = line_count(source_log, connection)
# get the log lines of interest, put them in test_logfile
cut_lines(line_count_start, line_count_end, source_log, test_logfile, connection)
# use same location on remote and local for log file extract
ssh.download_file(test_logfile)
# search the log file extract for the line with POST to candlepin API
with open(test_logfile) as logfile:
for line in logfile:
if re.search(r'verb=POST, uri=/candlepin/owners/{0}', line.format(module_org.name)):
logger.info('Found the line with POST to candlepin API')
POST_line_found = True
# Confirm the request ID was logged in the line with POST
match = re.search(regex, line)
assert match, "Request ID not found"
logger.info("Request ID found for logging from candlepin")
break
assert POST_line_found, "The POST command to candlepin was not found in logs."
@pytest.mark.tier4
def test_positive_logging_from_dynflow(module_org):
"""Check POST to repositories API is logged while enabling a repo \
and it has the request ID.
:id: 2d1a5f64-0b1c-4f95-ad20-881134717c4c
:expectedresults: line of log with POST has request ID
:CaseImportance: Medium
"""
POST_line_found = False
source_log = '/var/log/foreman/production.log'
test_logfile = '/var/tmp/logfile_dynflow'
product = entities.Product(organization=module_org).create()
repo_name = gen_string('alpha')
with ssh.get_connection() as connection:
# get the number of lines in the source log before the test
line_count_start = line_count(source_log, connection)
# command for this test
new_repo = entities.Repository(name=repo_name, product=product).create()
logger.info(f'Created Repo {new_repo.name} for dynflow log test')
# get the number of lines in the source log after the test
line_count_end = line_count(source_log, connection)
# get the log lines of interest, put them in test_logfile
cut_lines(line_count_start, line_count_end, source_log, test_logfile, connection)
# use same location on remote and local for log file extract
ssh.download_file(test_logfile)
# search the log file extract for the line with POST to to repositories API
with open(test_logfile) as logfile:
for line in logfile:
if re.search(r'Started POST \"/katello\/api\/v2\/repositories', line):
logger.info('Found the line with POST to repositories API.')
POST_line_found = True
# Confirm the request ID was logged in the line with POST
match = re.search(r'\[I\|app\|\w{8}\]', line)
assert match, "Request ID not found"
logger.info("Request ID found for logging from dynflow ")
assert POST_line_found, "The POST command to enable a repo was not found in logs."
| gpl-3.0 |
chrismattmann/girder | plugins/celery_jobs/server/__init__.py | 2 | 3005 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright Kitware Inc.
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import celery
from girder import events
from girder.models.model_base import ValidationException
from girder.utility.model_importer import ModelImporter
from girder.plugins.jobs.constants import JobStatus
from .constants import PluginSettings
def validateSettings(event):
if event.info['key'] == PluginSettings.BROKER_URL:
if not event.info['value']:
raise ValidationException(
'Celery broker URL must not be empty.', 'value')
event.preventDefault().stopPropagation()
if event.info['key'] == PluginSettings.APP_MAIN:
if not event.info['value']:
raise ValidationException(
'Celery app main name must not be empty.', 'value')
event.preventDefault().stopPropagation()
if event.info['key'] == PluginSettings.CELERY_USER_ID:
if not event.info['value']:
raise ValidationException(
'Celery user ID must not be empty.', 'value')
ModelImporter.model('user').load(
event.info['value'], force=True, exc=True)
event.preventDefault().stopPropagation()
def getCeleryUser():
"""
Return the celery user specified as a system setting.
"""
userId = ModelImporter.model('setting').get(PluginSettings.CELERY_USER_ID)
if not userId:
raise Exception('No celery user ID setting present.')
user = ModelImporter.model('user').load(userId, force=True)
if not user:
raise Exception('Celery user does not exist ({}).'.format(userId))
return user
def schedule(event):
settingModel = ModelImporter.model('setting')
broker = settingModel.get(PluginSettings.BROKER_URL)
appMain = settingModel.get(PluginSettings.APP_MAIN, 'girder_celery')
celeryapp = celery.Celery(main=appMain, broker=broker)
job = event.info
if job['handler'] == 'celery':
job['status'] = JobStatus.QUEUED
ModelImporter.model('job', 'jobs').save(job)
event.stopPropagation()
celeryapp.send_task(
job['type'], job['args'], job['kwargs'])
def load(info):
events.bind('model.setting.validate', 'celery_jobs', validateSettings)
events.bind('jobs.schedule', 'celery_jobs', schedule)
| apache-2.0 |
pjuu/pjuu | pjuu/lib/indexes.py | 1 | 1615 | # -*- coding: utf8 -*-
"""Creates the MongoDB indexes. This can be run each time the app is deployed
it will have no effect on indexes which are already there.
:license: AGPL v3, see LICENSE for more details
:copyright: 2014-2021 Joe Doherty
"""
# 3rd party imports
import pymongo
# Pjuu imports
from pjuu import mongo as m
from pjuu.lib import keys as k
def ensure_indexes():
"""Creates all our MongoDB indexes.
Please note that _id fields are already indexed by MongoDB. If you need to
lookup a document by a field that is not in here, ensure you need it! If
it is a one off like how many users are banned it doesn't need to be here.
If you look up by a key all the time (new feature) it will probably need to
be indexed.
"""
# User indexes
# User name and e-mail address have to be unique across the database
m.db.users.ensure_index(
[('username', pymongo.DESCENDING)],
unique=True
)
m.db.users.ensure_index(
[('email', pymongo.DESCENDING)],
unique=True
)
# Set TTL indexes for newly created users (24 hour TTL)
m.db.users.ensure_index(
[('ttl', pymongo.DESCENDING)],
expireAfterSeconds=k.EXPIRE_24HRS
)
# Post indexes
# Allow us to see all posts made by a user
m.db.posts.ensure_index(
[('user_id', pymongo.DESCENDING)]
)
# Allow us to find all replies on a post
m.db.posts.ensure_index(
[('reply_to', pymongo.DESCENDING)]
)
# Index hash tags within posts
m.db.posts.ensure_index(
[('hashtags.hashtag', pymongo.DESCENDING)]
)
| agpl-3.0 |
google-research/rl-reliability-metrics | rl_reliability_metrics/analysis/plot_training_curves_test.py | 1 | 2747 | # coding=utf-8
# Copyright 2019 The Authors of RL Reliability Metrics.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for plot_training_curves."""
from absl.testing import parameterized
import numpy as np
from rl_reliability_metrics.analysis import plot_training_curves as ptc
import unittest
class PlotTrainingCurvesTest(parameterized.TestCase, unittest.TestCase):
@parameterized.parameters(
(None, [0, 1, 2, 3], [[5, 6, 7, 8], [-1, -2, -3, -4]]),
(1, [0.5, 1.5, 2.5, 3.5], [[5, 6, 7, 8], [-1, -2, -3, -4]]),
(3, [1.5, 4.5], [[6, 8], [-2, -4]]),
)
def test_window_means(self, window_size, expected_timesteps,
expected_window_means):
curves = np.array([[[0, 1, 2, 3], [5, 6, 7, 8]],
[[0, 1, 2, 3], [-1, -2, -3, -4]]])
timesteps, window_means = ptc.compute_window_means(curves, window_size)
np.testing.assert_array_equal(timesteps, expected_timesteps)
np.testing.assert_array_equal(window_means, expected_window_means)
def test_compute_means(self):
window_means = [[1, 2, 3, 4], [5, 6, 7, 8]]
means = ptc.compute_means(window_means)
np.testing.assert_array_equal(means, [3, 4, 5, 6])
def test_compute_medians(self):
window_means = [[1, 2, 3, 4], [5, 6, 7, 8], [5, 6, 7, 8]]
means = ptc.compute_medians(window_means)
np.testing.assert_array_equal(means, [5, 6, 7, 8])
@parameterized.parameters(
([[0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0]]),)
def test_compute_boot_ci(self, window_means, expected):
window_means = np.array(window_means)
cis = ptc.compute_boot_ci(window_means)
np.testing.assert_allclose(cis[0], expected[0])
np.testing.assert_allclose(cis[1], expected[1])
@parameterized.parameters(
([[0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0]]),
([[0, -2], [1, 4]], [[0.1, -1.4], [0.80, 2.8]]),
)
def test_compute_percentiles(self, window_means, expected):
window_means = np.array(window_means)
percentiles = ptc.compute_percentiles(
window_means, lower_thresh=10, upper_thresh=80)
np.testing.assert_allclose(percentiles[0], expected[0])
np.testing.assert_allclose(percentiles[1], expected[1])
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
drglove/SickRage | lib/profilehooks.py | 143 | 24752 | """
Profiling hooks
This module contains a couple of decorators (`profile` and `coverage`) that
can be used to wrap functions and/or methods to produce profiles and line
coverage reports. There's a third convenient decorator (`timecall`) that
measures the duration of function execution without the extra profiling
overhead.
Usage example (Python 2.4 or newer)::
from profilehooks import profile, coverage
@profile # or @coverage
def fn(n):
if n < 2: return 1
else: return n * fn(n-1)
print fn(42)
Usage example (Python 2.3 or older)::
from profilehooks import profile, coverage
def fn(n):
if n < 2: return 1
else: return n * fn(n-1)
# Now wrap that function in a decorator
fn = profile(fn) # or coverage(fn)
print fn(42)
Reports for all thusly decorated functions will be printed to sys.stdout
on program termination. You can alternatively request for immediate
reports for each call by passing immediate=True to the profile decorator.
There's also a @timecall decorator for printing the time to sys.stderr
every time a function is called, when you just want to get a rough measure
instead of a detailed (but costly) profile.
Caveats
A thread on python-dev convinced me that hotshot produces bogus numbers.
See http://mail.python.org/pipermail/python-dev/2005-November/058264.html
I don't know what will happen if a decorated function will try to call
another decorated function. All decorators probably need to explicitly
support nested profiling (currently TraceFuncCoverage is the only one
that supports this, while HotShotFuncProfile has support for recursive
functions.)
Profiling with hotshot creates temporary files (*.prof for profiling,
*.cprof for coverage) in the current directory. These files are not
cleaned up. Exception: when you specify a filename to the profile
decorator (to store the pstats.Stats object for later inspection),
the temporary file will be the filename you specified with '.raw'
appended at the end.
Coverage analysis with hotshot seems to miss some executions resulting
in lower line counts and some lines errorneously marked as never
executed. For this reason coverage analysis now uses trace.py which is
slower, but more accurate.
Copyright (c) 2004--2008 Marius Gedminas <marius@pov.lt>
Copyright (c) 2007 Hanno Schlichting
Copyright (c) 2008 Florian Schulze
Released under the MIT licence since December 2006:
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
(Previously it was distributed under the GNU General Public Licence.)
"""
# $Id: profilehooks.py 29 2010-08-13 16:29:20Z mg $
__author__ = "Marius Gedminas (marius@gedmin.as)"
__copyright__ = "Copyright 2004-2009 Marius Gedminas"
__license__ = "MIT"
__version__ = "1.4"
__date__ = "2009-03-31"
import atexit
import inspect
import sys
import re
# For profiling
from profile import Profile
import pstats
# For hotshot profiling (inaccurate!)
try:
import hotshot
import hotshot.stats
except ImportError:
hotshot = None
# For trace.py coverage
import trace
# For hotshot coverage (inaccurate!; uses undocumented APIs; might break)
if hotshot is not None:
import _hotshot
import hotshot.log
# For cProfile profiling (best)
try:
import cProfile
except ImportError:
cProfile = None
# For timecall
import time
# registry of available profilers
AVAILABLE_PROFILERS = {}
def profile(fn=None, skip=0, filename=None, immediate=False, dirs=False,
sort=None, entries=40,
profiler=('cProfile', 'profile', 'hotshot')):
"""Mark `fn` for profiling.
If `skip` is > 0, first `skip` calls to `fn` will not be profiled.
If `immediate` is False, profiling results will be printed to
sys.stdout on program termination. Otherwise results will be printed
after each call.
If `dirs` is False only the name of the file will be printed.
Otherwise the full path is used.
`sort` can be a list of sort keys (defaulting to ['cumulative',
'time', 'calls']). The following ones are recognized::
'calls' -- call count
'cumulative' -- cumulative time
'file' -- file name
'line' -- line number
'module' -- file name
'name' -- function name
'nfl' -- name/file/line
'pcalls' -- call count
'stdname' -- standard name
'time' -- internal time
`entries` limits the output to the first N entries.
`profiler` can be used to select the preferred profiler, or specify a
sequence of them, in order of preference. The default is ('cProfile'.
'profile', 'hotshot').
If `filename` is specified, the profile stats will be stored in the
named file. You can load them pstats.Stats(filename).
Usage::
def fn(...):
...
fn = profile(fn, skip=1)
If you are using Python 2.4, you should be able to use the decorator
syntax::
@profile(skip=3)
def fn(...):
...
or just ::
@profile
def fn(...):
...
"""
if fn is None: # @profile() syntax -- we are a decorator maker
def decorator(fn):
return profile(fn, skip=skip, filename=filename,
immediate=immediate, dirs=dirs,
sort=sort, entries=entries,
profiler=profiler)
return decorator
# @profile syntax -- we are a decorator.
if isinstance(profiler, str):
profiler = [profiler]
for p in profiler:
if p in AVAILABLE_PROFILERS:
profiler_class = AVAILABLE_PROFILERS[p]
break
else:
raise ValueError('only these profilers are available: %s'
% ', '.join(AVAILABLE_PROFILERS))
fp = profiler_class(fn, skip=skip, filename=filename,
immediate=immediate, dirs=dirs,
sort=sort, entries=entries)
# fp = HotShotFuncProfile(fn, skip=skip, filename=filename, ...)
# or HotShotFuncProfile
# We cannot return fp or fp.__call__ directly as that would break method
# definitions, instead we need to return a plain function.
def new_fn(*args, **kw):
return fp(*args, **kw)
new_fn.__doc__ = fn.__doc__
new_fn.__name__ = fn.__name__
new_fn.__dict__ = fn.__dict__
new_fn.__module__ = fn.__module__
return new_fn
def coverage(fn):
"""Mark `fn` for line coverage analysis.
Results will be printed to sys.stdout on program termination.
Usage::
def fn(...):
...
fn = coverage(fn)
If you are using Python 2.4, you should be able to use the decorator
syntax::
@coverage
def fn(...):
...
"""
fp = TraceFuncCoverage(fn) # or HotShotFuncCoverage
# We cannot return fp or fp.__call__ directly as that would break method
# definitions, instead we need to return a plain function.
def new_fn(*args, **kw):
return fp(*args, **kw)
new_fn.__doc__ = fn.__doc__
new_fn.__name__ = fn.__name__
new_fn.__dict__ = fn.__dict__
new_fn.__module__ = fn.__module__
return new_fn
def coverage_with_hotshot(fn):
"""Mark `fn` for line coverage analysis.
Uses the 'hotshot' module for fast coverage analysis.
BUG: Produces inaccurate results.
See the docstring of `coverage` for usage examples.
"""
fp = HotShotFuncCoverage(fn)
# We cannot return fp or fp.__call__ directly as that would break method
# definitions, instead we need to return a plain function.
def new_fn(*args, **kw):
return fp(*args, **kw)
new_fn.__doc__ = fn.__doc__
new_fn.__name__ = fn.__name__
new_fn.__dict__ = fn.__dict__
new_fn.__module__ = fn.__module__
return new_fn
class FuncProfile(object):
"""Profiler for a function (uses profile)."""
# This flag is shared between all instances
in_profiler = False
Profile = Profile
def __init__(self, fn, skip=0, filename=None, immediate=False, dirs=False,
sort=None, entries=40):
"""Creates a profiler for a function.
Every profiler has its own log file (the name of which is derived
from the function name).
FuncProfile registers an atexit handler that prints profiling
information to sys.stderr when the program terminates.
"""
self.fn = fn
self.skip = skip
self.filename = filename
self.immediate = immediate
self.dirs = dirs
self.sort = sort or ('cumulative', 'time', 'calls')
if isinstance(self.sort, str):
self.sort = (self.sort, )
self.entries = entries
self.reset_stats()
atexit.register(self.atexit)
def __call__(self, *args, **kw):
"""Profile a singe call to the function."""
self.ncalls += 1
if self.skip > 0:
self.skip -= 1
self.skipped += 1
return self.fn(*args, **kw)
if FuncProfile.in_profiler:
# handle recursive calls
return self.fn(*args, **kw)
# You cannot reuse the same profiler for many calls and accumulate
# stats that way. :-/
profiler = self.Profile()
try:
FuncProfile.in_profiler = True
return profiler.runcall(self.fn, *args, **kw)
finally:
FuncProfile.in_profiler = False
self.stats.add(profiler)
if self.immediate:
self.print_stats()
self.reset_stats()
def print_stats(self):
"""Print profile information to sys.stdout."""
funcname = self.fn.__name__
filename = self.fn.func_code.co_filename
lineno = self.fn.func_code.co_firstlineno
print
print "*** PROFILER RESULTS ***"
print "%s (%s:%s)" % (funcname, filename, lineno)
print "function called %d times" % self.ncalls,
if self.skipped:
print "(%d calls not profiled)" % self.skipped
else:
print
print
stats = self.stats
if self.filename:
stats.dump_stats(self.filename)
if not self.dirs:
stats.strip_dirs()
stats.sort_stats(*self.sort)
stats.print_stats(self.entries)
def reset_stats(self):
"""Reset accumulated profiler statistics."""
# Note: not using self.Profile, since pstats.Stats() fails then
self.stats = pstats.Stats(Profile())
self.ncalls = 0
self.skipped = 0
def atexit(self):
"""Stop profiling and print profile information to sys.stdout.
This function is registered as an atexit hook.
"""
if not self.immediate:
self.print_stats()
AVAILABLE_PROFILERS['profile'] = FuncProfile
if cProfile is not None:
class CProfileFuncProfile(FuncProfile):
"""Profiler for a function (uses cProfile)."""
Profile = cProfile.Profile
AVAILABLE_PROFILERS['cProfile'] = CProfileFuncProfile
if hotshot is not None:
class HotShotFuncProfile(object):
"""Profiler for a function (uses hotshot)."""
# This flag is shared between all instances
in_profiler = False
def __init__(self, fn, skip=0, filename=None):
"""Creates a profiler for a function.
Every profiler has its own log file (the name of which is derived
from the function name).
HotShotFuncProfile registers an atexit handler that prints
profiling information to sys.stderr when the program terminates.
The log file is not removed and remains there to clutter the
current working directory.
"""
self.fn = fn
self.filename = filename
if self.filename:
self.logfilename = filename + ".raw"
else:
self.logfilename = fn.__name__ + ".prof"
self.profiler = hotshot.Profile(self.logfilename)
self.ncalls = 0
self.skip = skip
self.skipped = 0
atexit.register(self.atexit)
def __call__(self, *args, **kw):
"""Profile a singe call to the function."""
self.ncalls += 1
if self.skip > 0:
self.skip -= 1
self.skipped += 1
return self.fn(*args, **kw)
if HotShotFuncProfile.in_profiler:
# handle recursive calls
return self.fn(*args, **kw)
try:
HotShotFuncProfile.in_profiler = True
return self.profiler.runcall(self.fn, *args, **kw)
finally:
HotShotFuncProfile.in_profiler = False
def atexit(self):
"""Stop profiling and print profile information to sys.stderr.
This function is registered as an atexit hook.
"""
self.profiler.close()
funcname = self.fn.__name__
filename = self.fn.func_code.co_filename
lineno = self.fn.func_code.co_firstlineno
print
print "*** PROFILER RESULTS ***"
print "%s (%s:%s)" % (funcname, filename, lineno)
print "function called %d times" % self.ncalls,
if self.skipped:
print "(%d calls not profiled)" % self.skipped
else:
print
print
stats = hotshot.stats.load(self.logfilename)
# hotshot.stats.load takes ages, and the .prof file eats megabytes, but
# a saved stats object is small and fast
if self.filename:
stats.dump_stats(self.filename)
# it is best to save before strip_dirs
stats.strip_dirs()
stats.sort_stats('cumulative', 'time', 'calls')
stats.print_stats(40)
AVAILABLE_PROFILERS['hotshot'] = HotShotFuncProfile
class HotShotFuncCoverage:
"""Coverage analysis for a function (uses _hotshot).
HotShot coverage is reportedly faster than trace.py, but it appears to
have problems with exceptions; also line counts in coverage reports
are generally lower from line counts produced by TraceFuncCoverage.
Is this my bug, or is it a problem with _hotshot?
"""
def __init__(self, fn):
"""Creates a profiler for a function.
Every profiler has its own log file (the name of which is derived
from the function name).
HotShotFuncCoverage registers an atexit handler that prints
profiling information to sys.stderr when the program terminates.
The log file is not removed and remains there to clutter the
current working directory.
"""
self.fn = fn
self.logfilename = fn.__name__ + ".cprof"
self.profiler = _hotshot.coverage(self.logfilename)
self.ncalls = 0
atexit.register(self.atexit)
def __call__(self, *args, **kw):
"""Profile a singe call to the function."""
self.ncalls += 1
return self.profiler.runcall(self.fn, args, kw)
def atexit(self):
"""Stop profiling and print profile information to sys.stderr.
This function is registered as an atexit hook.
"""
self.profiler.close()
funcname = self.fn.__name__
filename = self.fn.func_code.co_filename
lineno = self.fn.func_code.co_firstlineno
print
print "*** COVERAGE RESULTS ***"
print "%s (%s:%s)" % (funcname, filename, lineno)
print "function called %d times" % self.ncalls
print
fs = FuncSource(self.fn)
reader = hotshot.log.LogReader(self.logfilename)
for what, (filename, lineno, funcname), tdelta in reader:
if filename != fs.filename:
continue
if what == hotshot.log.LINE:
fs.mark(lineno)
if what == hotshot.log.ENTER:
# hotshot gives us the line number of the function definition
# and never gives us a LINE event for the first statement in
# a function, so if we didn't perform this mapping, the first
# statement would be marked as never executed
if lineno == fs.firstlineno:
lineno = fs.firstcodelineno
fs.mark(lineno)
reader.close()
print fs
class TraceFuncCoverage:
"""Coverage analysis for a function (uses trace module).
HotShot coverage analysis is reportedly faster, but it appears to have
problems with exceptions.
"""
# Shared between all instances so that nested calls work
tracer = trace.Trace(count=True, trace=False,
ignoredirs=[sys.prefix, sys.exec_prefix])
# This flag is also shared between all instances
tracing = False
def __init__(self, fn):
"""Creates a profiler for a function.
Every profiler has its own log file (the name of which is derived
from the function name).
TraceFuncCoverage registers an atexit handler that prints
profiling information to sys.stderr when the program terminates.
The log file is not removed and remains there to clutter the
current working directory.
"""
self.fn = fn
self.logfilename = fn.__name__ + ".cprof"
self.ncalls = 0
atexit.register(self.atexit)
def __call__(self, *args, **kw):
"""Profile a singe call to the function."""
self.ncalls += 1
if TraceFuncCoverage.tracing:
return self.fn(*args, **kw)
try:
TraceFuncCoverage.tracing = True
return self.tracer.runfunc(self.fn, *args, **kw)
finally:
TraceFuncCoverage.tracing = False
def atexit(self):
"""Stop profiling and print profile information to sys.stderr.
This function is registered as an atexit hook.
"""
funcname = self.fn.__name__
filename = self.fn.func_code.co_filename
lineno = self.fn.func_code.co_firstlineno
print
print "*** COVERAGE RESULTS ***"
print "%s (%s:%s)" % (funcname, filename, lineno)
print "function called %d times" % self.ncalls
print
fs = FuncSource(self.fn)
for (filename, lineno), count in self.tracer.counts.items():
if filename != fs.filename:
continue
fs.mark(lineno, count)
print fs
never_executed = fs.count_never_executed()
if never_executed:
print "%d lines were not executed." % never_executed
class FuncSource:
"""Source code annotator for a function."""
blank_rx = re.compile(r"^\s*finally:\s*(#.*)?$")
def __init__(self, fn):
self.fn = fn
self.filename = inspect.getsourcefile(fn)
self.source, self.firstlineno = inspect.getsourcelines(fn)
self.sourcelines = {}
self.firstcodelineno = self.firstlineno
self.find_source_lines()
def find_source_lines(self):
"""Mark all executable source lines in fn as executed 0 times."""
strs = trace.find_strings(self.filename)
lines = trace.find_lines_from_code(self.fn.func_code, strs)
self.firstcodelineno = sys.maxint
for lineno in lines:
self.firstcodelineno = min(self.firstcodelineno, lineno)
self.sourcelines.setdefault(lineno, 0)
if self.firstcodelineno == sys.maxint:
self.firstcodelineno = self.firstlineno
def mark(self, lineno, count=1):
"""Mark a given source line as executed count times.
Multiple calls to mark for the same lineno add up.
"""
self.sourcelines[lineno] = self.sourcelines.get(lineno, 0) + count
def count_never_executed(self):
"""Count statements that were never executed."""
lineno = self.firstlineno
counter = 0
for line in self.source:
if self.sourcelines.get(lineno) == 0:
if not self.blank_rx.match(line):
counter += 1
lineno += 1
return counter
def __str__(self):
"""Return annotated source code for the function."""
lines = []
lineno = self.firstlineno
for line in self.source:
counter = self.sourcelines.get(lineno)
if counter is None:
prefix = ' ' * 7
elif counter == 0:
if self.blank_rx.match(line):
prefix = ' ' * 7
else:
prefix = '>' * 6 + ' '
else:
prefix = '%5d: ' % counter
lines.append(prefix + line)
lineno += 1
return ''.join(lines)
def timecall(fn=None, immediate=True, timer=time.time):
"""Wrap `fn` and print its execution time.
Example::
@timecall
def somefunc(x, y):
time.sleep(x * y)
somefunc(2, 3)
will print the time taken by somefunc on every call. If you want just
a summary at program termination, use
@timecall(immediate=False)
You can also choose a timing method other than the default ``time.time()``,
e.g.:
@timecall(timer=time.clock)
"""
if fn is None: # @timecall() syntax -- we are a decorator maker
def decorator(fn):
return timecall(fn, immediate=immediate, timer=timer)
return decorator
# @timecall syntax -- we are a decorator.
fp = FuncTimer(fn, immediate=immediate, timer=timer)
# We cannot return fp or fp.__call__ directly as that would break method
# definitions, instead we need to return a plain function.
def new_fn(*args, **kw):
return fp(*args, **kw)
new_fn.__doc__ = fn.__doc__
new_fn.__name__ = fn.__name__
new_fn.__dict__ = fn.__dict__
new_fn.__module__ = fn.__module__
return new_fn
class FuncTimer(object):
def __init__(self, fn, immediate, timer):
self.fn = fn
self.ncalls = 0
self.totaltime = 0
self.immediate = immediate
self.timer = timer
if not immediate:
atexit.register(self.atexit)
def __call__(self, *args, **kw):
"""Profile a singe call to the function."""
fn = self.fn
timer = self.timer
self.ncalls += 1
try:
start = timer()
return fn(*args, **kw)
finally:
duration = timer() - start
self.totaltime += duration
if self.immediate:
funcname = fn.__name__
filename = fn.func_code.co_filename
lineno = fn.func_code.co_firstlineno
print >> sys.stderr, "\n %s (%s:%s):\n %.3f seconds\n" % (
funcname, filename, lineno, duration)
def atexit(self):
if not self.ncalls:
return
funcname = self.fn.__name__
filename = self.fn.func_code.co_filename
lineno = self.fn.func_code.co_firstlineno
print ("\n %s (%s:%s):\n"
" %d calls, %.3f seconds (%.3f seconds per call)\n" % (
funcname, filename, lineno, self.ncalls,
self.totaltime, self.totaltime / self.ncalls))
| gpl-3.0 |
rain1988/you-get | src/you_get/extractors/iqiyi.py | 6 | 5273 | #!/usr/bin/env python
__all__ = ['iqiyi_download']
from ..common import *
from uuid import uuid4
from random import random,randint
import json
from math import floor
from zlib import decompress
import hashlib
'''
Changelog:
-> http://www.iqiyi.com/common/flashplayer/20150810/MainPlayer_5_2_26_c3_3_7_1.swf
http://www.iqiyi.com/common/flashplayer/20150811/MainPlayer_5_2_26_c3_3_7_2.swf
http://www.iqiyi.com/common/flashplayer/20150820/MainPlayer_5_2_27_2_c3_3_7_3.swf
some small changes in Zombie.bite function
-> http://www.iqiyi.com/common/flashplayer/20150805/MainPlayer_5_2_26_c3_3_7.swf
former key still works until 20150809
In Zombie kcuf = [13, 3, 0, 15, 8, 2, 11, 7, 10, 1, 12, 9, 14, 6, 4, 5] ,which is construct in LogManager,CoreManager,impls.pub.setting,impls.pub.statistics,StageVideoManager
thd create a array of ['2', 'd', 'f', 'e', '0', 'c', '5', '3', '8', 'b', '9', '6', 'a', '7', '4', '1']
-> http://www.iqiyi.com/common/flashplayer/20150710/MainPlayer_5_2_25_c3_3_5_1.swf
-> http://www.iqiyi.com/common/flashplayer/20150703/MainPlayer_5_2_24_1_c3_3_3.swf
SingletonClass.ekam
-> http://www.iqiyi.com/common/flashplayer/20150618/MainPlayer_5_2_24_1_c3_3_2.swf
In this version Z7elzzup.cexe,just use node.js to run this code(with some modification) and get innerkey.
'''
'''
com.qiyi.player.core.model.def.DefinitonEnum
bid meaning for quality
0 none
1 standard
2 high
3 super
4 suprt-high
5 fullhd
10 4k
96 topspeed
'''
def mix(tvid):
enc = []
enc.append('3cba91f1453145438ac5e4f5983bc086')
tm = str(randint(2000,4000))
src = 'eknas'
enc.append(str(tm))
enc.append(tvid)
sc = hashlib.new('md5',bytes("".join(enc),'utf-8')).hexdigest()
return tm,sc,src
def getVRSXORCode(arg1,arg2):
loc3=arg2 %3
if loc3 == 1:
return arg1^121
if loc3 == 2:
return arg1^72
return arg1^103
def getVrsEncodeCode(vlink):
loc6=0
loc2=''
loc3=vlink.split("-")
loc4=len(loc3)
# loc5=loc4-1
for i in range(loc4-1,-1,-1):
loc6=getVRSXORCode(int(loc3[loc4-i-1],16),i)
loc2+=chr(loc6)
return loc2[::-1]
def getVMS(tvid,vid,uid):
#tm ->the flash run time for md5 usage
#um -> vip 1 normal 0
#authkey -> for password protected video ,replace '' with your password
#puid user.passportid may empty?
#TODO: support password protected video
tm,sc,src = mix(tvid)
vmsreq='http://cache.video.qiyi.com/vms?key=fvip&src=1702633101b340d8917a69cf8a4b8c7' +\
"&tvId="+tvid+"&vid="+vid+"&vinfo=1&tm="+tm+\
"&enc="+sc+\
"&qyid="+uid+"&tn="+str(random()) +"&um=1" +\
"&authkey="+hashlib.new('md5',bytes(''+str(tm)+tvid,'utf-8')).hexdigest()
return json.loads(get_content(vmsreq))
def getDispathKey(rid):
tp=")(*&^flash@#$%a" #magic from swf
time=json.loads(get_content("http://data.video.qiyi.com/t?tn="+str(random())))["t"]
t=str(int(floor(int(time)/(10*60.0))))
return hashlib.new("md5",bytes(t+tp+rid,"utf-8")).hexdigest()
def iqiyi_download(url, output_dir = '.', merge = True, info_only = False, **kwargs):
gen_uid=uuid4().hex
html = get_html(url)
tvid = r1(r'data-player-tvid="([^"]+)"', html) or r1(r'tvid=([^&]+)', url)
videoid = r1(r'data-player-videoid="([^"]+)"', html) or r1(r'vid=([^&]+)', url)
assert tvid
assert videoid
info = getVMS(tvid, videoid, gen_uid)
assert info["code"] == "A000000"
title = info["data"]["vi"]["vn"]
# data.vp = json.data.vp
# data.vi = json.data.vi
# data.f4v = json.data.f4v
# if movieIsMember data.vp = json.data.np
#for highest qualities
#for http://www.iqiyi.com/v_19rrmmz5yw.html not vp -> np
try:
if info["data"]['vp']["tkl"]=='' :
raise ValueError
except:
log.e("[Error] Do not support for iQIYI VIP video.")
exit(-1)
bid=0
for i in info["data"]["vp"]["tkl"][0]["vs"]:
if int(i["bid"])<=10 and int(i["bid"])>=bid:
bid=int(i["bid"])
video_links=i["fs"] #now in i["flvs"] not in i["fs"]
if not i["fs"][0]["l"].startswith("/"):
tmp = getVrsEncodeCode(i["fs"][0]["l"])
if tmp.endswith('mp4'):
video_links = i["flvs"]
urls=[]
size=0
for i in video_links:
vlink=i["l"]
if not vlink.startswith("/"):
#vlink is encode
vlink=getVrsEncodeCode(vlink)
key=getDispathKey(vlink.split("/")[-1].split(".")[0])
size+=i["b"]
baseurl=info["data"]["vp"]["du"].split("/")
baseurl.insert(-1,key)
url="/".join(baseurl)+vlink+'?su='+gen_uid+'&qyid='+uuid4().hex+'&client=&z=&bt=&ct=&tn='+str(randint(10000,20000))
urls.append(json.loads(get_content(url))["l"])
#download should be complete in 10 minutes
#because the url is generated before start downloading
#and the key may be expired after 10 minutes
print_info(site_info, title, 'flv', size)
if not info_only:
download_urls(urls, title, 'flv', size, output_dir = output_dir, merge = merge)
site_info = "iQIYI.com"
download = iqiyi_download
download_playlist = playlist_not_supported('iqiyi')
| mit |
erlimar/prototypeguide | src/lib/jinja2/testsuite/lexnparse.py | 402 | 22314 | # -*- coding: utf-8 -*-
"""
jinja2.testsuite.lexnparse
~~~~~~~~~~~~~~~~~~~~~~~~~~
All the unittests regarding lexing, parsing and syntax.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
import unittest
from jinja2.testsuite import JinjaTestCase
from jinja2 import Environment, Template, TemplateSyntaxError, \
UndefinedError, nodes
from jinja2._compat import next, iteritems, text_type, PY2
from jinja2.lexer import Token, TokenStream, TOKEN_EOF, \
TOKEN_BLOCK_BEGIN, TOKEN_BLOCK_END
env = Environment()
# how does a string look like in jinja syntax?
if PY2:
def jinja_string_repr(string):
return repr(string)[1:]
else:
jinja_string_repr = repr
class TokenStreamTestCase(JinjaTestCase):
test_tokens = [Token(1, TOKEN_BLOCK_BEGIN, ''),
Token(2, TOKEN_BLOCK_END, ''),
]
def test_simple(self):
ts = TokenStream(self.test_tokens, "foo", "bar")
assert ts.current.type is TOKEN_BLOCK_BEGIN
assert bool(ts)
assert not bool(ts.eos)
next(ts)
assert ts.current.type is TOKEN_BLOCK_END
assert bool(ts)
assert not bool(ts.eos)
next(ts)
assert ts.current.type is TOKEN_EOF
assert not bool(ts)
assert bool(ts.eos)
def test_iter(self):
token_types = [t.type for t in TokenStream(self.test_tokens, "foo", "bar")]
assert token_types == ['block_begin', 'block_end', ]
class LexerTestCase(JinjaTestCase):
def test_raw1(self):
tmpl = env.from_string('{% raw %}foo{% endraw %}|'
'{%raw%}{{ bar }}|{% baz %}{% endraw %}')
assert tmpl.render() == 'foo|{{ bar }}|{% baz %}'
def test_raw2(self):
tmpl = env.from_string('1 {%- raw -%} 2 {%- endraw -%} 3')
assert tmpl.render() == '123'
def test_balancing(self):
env = Environment('{%', '%}', '${', '}')
tmpl = env.from_string('''{% for item in seq
%}${{'foo': item}|upper}{% endfor %}''')
assert tmpl.render(seq=list(range(3))) == "{'FOO': 0}{'FOO': 1}{'FOO': 2}"
def test_comments(self):
env = Environment('<!--', '-->', '{', '}')
tmpl = env.from_string('''\
<ul>
<!--- for item in seq -->
<li>{item}</li>
<!--- endfor -->
</ul>''')
assert tmpl.render(seq=list(range(3))) == ("<ul>\n <li>0</li>\n "
"<li>1</li>\n <li>2</li>\n</ul>")
def test_string_escapes(self):
for char in u'\0', u'\u2668', u'\xe4', u'\t', u'\r', u'\n':
tmpl = env.from_string('{{ %s }}' % jinja_string_repr(char))
assert tmpl.render() == char
assert env.from_string('{{ "\N{HOT SPRINGS}" }}').render() == u'\u2668'
def test_bytefallback(self):
from pprint import pformat
tmpl = env.from_string(u'''{{ 'foo'|pprint }}|{{ 'bär'|pprint }}''')
assert tmpl.render() == pformat('foo') + '|' + pformat(u'bär')
def test_operators(self):
from jinja2.lexer import operators
for test, expect in iteritems(operators):
if test in '([{}])':
continue
stream = env.lexer.tokenize('{{ %s }}' % test)
next(stream)
assert stream.current.type == expect
def test_normalizing(self):
for seq in '\r', '\r\n', '\n':
env = Environment(newline_sequence=seq)
tmpl = env.from_string('1\n2\r\n3\n4\n')
result = tmpl.render()
assert result.replace(seq, 'X') == '1X2X3X4'
def test_trailing_newline(self):
for keep in [True, False]:
env = Environment(keep_trailing_newline=keep)
for template,expected in [
('', {}),
('no\nnewline', {}),
('with\nnewline\n', {False: 'with\nnewline'}),
('with\nseveral\n\n\n', {False: 'with\nseveral\n\n'}),
]:
tmpl = env.from_string(template)
expect = expected.get(keep, template)
result = tmpl.render()
assert result == expect, (keep, template, result, expect)
class ParserTestCase(JinjaTestCase):
def test_php_syntax(self):
env = Environment('<?', '?>', '<?=', '?>', '<!--', '-->')
tmpl = env.from_string('''\
<!-- I'm a comment, I'm not interesting -->\
<? for item in seq -?>
<?= item ?>
<?- endfor ?>''')
assert tmpl.render(seq=list(range(5))) == '01234'
def test_erb_syntax(self):
env = Environment('<%', '%>', '<%=', '%>', '<%#', '%>')
tmpl = env.from_string('''\
<%# I'm a comment, I'm not interesting %>\
<% for item in seq -%>
<%= item %>
<%- endfor %>''')
assert tmpl.render(seq=list(range(5))) == '01234'
def test_comment_syntax(self):
env = Environment('<!--', '-->', '${', '}', '<!--#', '-->')
tmpl = env.from_string('''\
<!--# I'm a comment, I'm not interesting -->\
<!-- for item in seq --->
${item}
<!--- endfor -->''')
assert tmpl.render(seq=list(range(5))) == '01234'
def test_balancing(self):
tmpl = env.from_string('''{{{'foo':'bar'}.foo}}''')
assert tmpl.render() == 'bar'
def test_start_comment(self):
tmpl = env.from_string('''{# foo comment
and bar comment #}
{% macro blub() %}foo{% endmacro %}
{{ blub() }}''')
assert tmpl.render().strip() == 'foo'
def test_line_syntax(self):
env = Environment('<%', '%>', '${', '}', '<%#', '%>', '%')
tmpl = env.from_string('''\
<%# regular comment %>
% for item in seq:
${item}
% endfor''')
assert [int(x.strip()) for x in tmpl.render(seq=list(range(5))).split()] == \
list(range(5))
env = Environment('<%', '%>', '${', '}', '<%#', '%>', '%', '##')
tmpl = env.from_string('''\
<%# regular comment %>
% for item in seq:
${item} ## the rest of the stuff
% endfor''')
assert [int(x.strip()) for x in tmpl.render(seq=list(range(5))).split()] == \
list(range(5))
def test_line_syntax_priority(self):
# XXX: why is the whitespace there in front of the newline?
env = Environment('{%', '%}', '${', '}', '/*', '*/', '##', '#')
tmpl = env.from_string('''\
/* ignore me.
I'm a multiline comment */
## for item in seq:
* ${item} # this is just extra stuff
## endfor''')
assert tmpl.render(seq=[1, 2]).strip() == '* 1\n* 2'
env = Environment('{%', '%}', '${', '}', '/*', '*/', '#', '##')
tmpl = env.from_string('''\
/* ignore me.
I'm a multiline comment */
# for item in seq:
* ${item} ## this is just extra stuff
## extra stuff i just want to ignore
# endfor''')
assert tmpl.render(seq=[1, 2]).strip() == '* 1\n\n* 2'
def test_error_messages(self):
def assert_error(code, expected):
try:
Template(code)
except TemplateSyntaxError as e:
assert str(e) == expected, 'unexpected error message'
else:
assert False, 'that was supposed to be an error'
assert_error('{% for item in seq %}...{% endif %}',
"Encountered unknown tag 'endif'. Jinja was looking "
"for the following tags: 'endfor' or 'else'. The "
"innermost block that needs to be closed is 'for'.")
assert_error('{% if foo %}{% for item in seq %}...{% endfor %}{% endfor %}',
"Encountered unknown tag 'endfor'. Jinja was looking for "
"the following tags: 'elif' or 'else' or 'endif'. The "
"innermost block that needs to be closed is 'if'.")
assert_error('{% if foo %}',
"Unexpected end of template. Jinja was looking for the "
"following tags: 'elif' or 'else' or 'endif'. The "
"innermost block that needs to be closed is 'if'.")
assert_error('{% for item in seq %}',
"Unexpected end of template. Jinja was looking for the "
"following tags: 'endfor' or 'else'. The innermost block "
"that needs to be closed is 'for'.")
assert_error('{% block foo-bar-baz %}',
"Block names in Jinja have to be valid Python identifiers "
"and may not contain hyphens, use an underscore instead.")
assert_error('{% unknown_tag %}',
"Encountered unknown tag 'unknown_tag'.")
class SyntaxTestCase(JinjaTestCase):
def test_call(self):
env = Environment()
env.globals['foo'] = lambda a, b, c, e, g: a + b + c + e + g
tmpl = env.from_string("{{ foo('a', c='d', e='f', *['b'], **{'g': 'h'}) }}")
assert tmpl.render() == 'abdfh'
def test_slicing(self):
tmpl = env.from_string('{{ [1, 2, 3][:] }}|{{ [1, 2, 3][::-1] }}')
assert tmpl.render() == '[1, 2, 3]|[3, 2, 1]'
def test_attr(self):
tmpl = env.from_string("{{ foo.bar }}|{{ foo['bar'] }}")
assert tmpl.render(foo={'bar': 42}) == '42|42'
def test_subscript(self):
tmpl = env.from_string("{{ foo[0] }}|{{ foo[-1] }}")
assert tmpl.render(foo=[0, 1, 2]) == '0|2'
def test_tuple(self):
tmpl = env.from_string('{{ () }}|{{ (1,) }}|{{ (1, 2) }}')
assert tmpl.render() == '()|(1,)|(1, 2)'
def test_math(self):
tmpl = env.from_string('{{ (1 + 1 * 2) - 3 / 2 }}|{{ 2**3 }}')
assert tmpl.render() == '1.5|8'
def test_div(self):
tmpl = env.from_string('{{ 3 // 2 }}|{{ 3 / 2 }}|{{ 3 % 2 }}')
assert tmpl.render() == '1|1.5|1'
def test_unary(self):
tmpl = env.from_string('{{ +3 }}|{{ -3 }}')
assert tmpl.render() == '3|-3'
def test_concat(self):
tmpl = env.from_string("{{ [1, 2] ~ 'foo' }}")
assert tmpl.render() == '[1, 2]foo'
def test_compare(self):
tmpl = env.from_string('{{ 1 > 0 }}|{{ 1 >= 1 }}|{{ 2 < 3 }}|'
'{{ 2 == 2 }}|{{ 1 <= 1 }}')
assert tmpl.render() == 'True|True|True|True|True'
def test_inop(self):
tmpl = env.from_string('{{ 1 in [1, 2, 3] }}|{{ 1 not in [1, 2, 3] }}')
assert tmpl.render() == 'True|False'
def test_literals(self):
tmpl = env.from_string('{{ [] }}|{{ {} }}|{{ () }}')
assert tmpl.render().lower() == '[]|{}|()'
def test_bool(self):
tmpl = env.from_string('{{ true and false }}|{{ false '
'or true }}|{{ not false }}')
assert tmpl.render() == 'False|True|True'
def test_grouping(self):
tmpl = env.from_string('{{ (true and false) or (false and true) and not false }}')
assert tmpl.render() == 'False'
def test_django_attr(self):
tmpl = env.from_string('{{ [1, 2, 3].0 }}|{{ [[1]].0.0 }}')
assert tmpl.render() == '1|1'
def test_conditional_expression(self):
tmpl = env.from_string('''{{ 0 if true else 1 }}''')
assert tmpl.render() == '0'
def test_short_conditional_expression(self):
tmpl = env.from_string('<{{ 1 if false }}>')
assert tmpl.render() == '<>'
tmpl = env.from_string('<{{ (1 if false).bar }}>')
self.assert_raises(UndefinedError, tmpl.render)
def test_filter_priority(self):
tmpl = env.from_string('{{ "foo"|upper + "bar"|upper }}')
assert tmpl.render() == 'FOOBAR'
def test_function_calls(self):
tests = [
(True, '*foo, bar'),
(True, '*foo, *bar'),
(True, '*foo, bar=42'),
(True, '**foo, *bar'),
(True, '**foo, bar'),
(False, 'foo, bar'),
(False, 'foo, bar=42'),
(False, 'foo, bar=23, *args'),
(False, 'a, b=c, *d, **e'),
(False, '*foo, **bar')
]
for should_fail, sig in tests:
if should_fail:
self.assert_raises(TemplateSyntaxError,
env.from_string, '{{ foo(%s) }}' % sig)
else:
env.from_string('foo(%s)' % sig)
def test_tuple_expr(self):
for tmpl in [
'{{ () }}',
'{{ (1, 2) }}',
'{{ (1, 2,) }}',
'{{ 1, }}',
'{{ 1, 2 }}',
'{% for foo, bar in seq %}...{% endfor %}',
'{% for x in foo, bar %}...{% endfor %}',
'{% for x in foo, %}...{% endfor %}'
]:
assert env.from_string(tmpl)
def test_trailing_comma(self):
tmpl = env.from_string('{{ (1, 2,) }}|{{ [1, 2,] }}|{{ {1: 2,} }}')
assert tmpl.render().lower() == '(1, 2)|[1, 2]|{1: 2}'
def test_block_end_name(self):
env.from_string('{% block foo %}...{% endblock foo %}')
self.assert_raises(TemplateSyntaxError, env.from_string,
'{% block x %}{% endblock y %}')
def test_constant_casing(self):
for const in True, False, None:
tmpl = env.from_string('{{ %s }}|{{ %s }}|{{ %s }}' % (
str(const), str(const).lower(), str(const).upper()
))
assert tmpl.render() == '%s|%s|' % (const, const)
def test_test_chaining(self):
self.assert_raises(TemplateSyntaxError, env.from_string,
'{{ foo is string is sequence }}')
assert env.from_string('{{ 42 is string or 42 is number }}'
).render() == 'True'
def test_string_concatenation(self):
tmpl = env.from_string('{{ "foo" "bar" "baz" }}')
assert tmpl.render() == 'foobarbaz'
def test_notin(self):
bar = range(100)
tmpl = env.from_string('''{{ not 42 in bar }}''')
assert tmpl.render(bar=bar) == text_type(not 42 in bar)
def test_implicit_subscribed_tuple(self):
class Foo(object):
def __getitem__(self, x):
return x
t = env.from_string('{{ foo[1, 2] }}')
assert t.render(foo=Foo()) == u'(1, 2)'
def test_raw2(self):
tmpl = env.from_string('{% raw %}{{ FOO }} and {% BAR %}{% endraw %}')
assert tmpl.render() == '{{ FOO }} and {% BAR %}'
def test_const(self):
tmpl = env.from_string('{{ true }}|{{ false }}|{{ none }}|'
'{{ none is defined }}|{{ missing is defined }}')
assert tmpl.render() == 'True|False|None|True|False'
def test_neg_filter_priority(self):
node = env.parse('{{ -1|foo }}')
assert isinstance(node.body[0].nodes[0], nodes.Filter)
assert isinstance(node.body[0].nodes[0].node, nodes.Neg)
def test_const_assign(self):
constass1 = '''{% set true = 42 %}'''
constass2 = '''{% for none in seq %}{% endfor %}'''
for tmpl in constass1, constass2:
self.assert_raises(TemplateSyntaxError, env.from_string, tmpl)
def test_localset(self):
tmpl = env.from_string('''{% set foo = 0 %}\
{% for item in [1, 2] %}{% set foo = 1 %}{% endfor %}\
{{ foo }}''')
assert tmpl.render() == '0'
def test_parse_unary(self):
tmpl = env.from_string('{{ -foo["bar"] }}')
assert tmpl.render(foo={'bar': 42}) == '-42'
tmpl = env.from_string('{{ -foo["bar"]|abs }}')
assert tmpl.render(foo={'bar': 42}) == '42'
class LstripBlocksTestCase(JinjaTestCase):
def test_lstrip(self):
env = Environment(lstrip_blocks=True, trim_blocks=False)
tmpl = env.from_string(''' {% if True %}\n {% endif %}''')
assert tmpl.render() == "\n"
def test_lstrip_trim(self):
env = Environment(lstrip_blocks=True, trim_blocks=True)
tmpl = env.from_string(''' {% if True %}\n {% endif %}''')
assert tmpl.render() == ""
def test_no_lstrip(self):
env = Environment(lstrip_blocks=True, trim_blocks=False)
tmpl = env.from_string(''' {%+ if True %}\n {%+ endif %}''')
assert tmpl.render() == " \n "
def test_lstrip_endline(self):
env = Environment(lstrip_blocks=True, trim_blocks=False)
tmpl = env.from_string(''' hello{% if True %}\n goodbye{% endif %}''')
assert tmpl.render() == " hello\n goodbye"
def test_lstrip_inline(self):
env = Environment(lstrip_blocks=True, trim_blocks=False)
tmpl = env.from_string(''' {% if True %}hello {% endif %}''')
assert tmpl.render() == 'hello '
def test_lstrip_nested(self):
env = Environment(lstrip_blocks=True, trim_blocks=False)
tmpl = env.from_string(''' {% if True %}a {% if True %}b {% endif %}c {% endif %}''')
assert tmpl.render() == 'a b c '
def test_lstrip_left_chars(self):
env = Environment(lstrip_blocks=True, trim_blocks=False)
tmpl = env.from_string(''' abc {% if True %}
hello{% endif %}''')
assert tmpl.render() == ' abc \n hello'
def test_lstrip_embeded_strings(self):
env = Environment(lstrip_blocks=True, trim_blocks=False)
tmpl = env.from_string(''' {% set x = " {% str %} " %}{{ x }}''')
assert tmpl.render() == ' {% str %} '
def test_lstrip_preserve_leading_newlines(self):
env = Environment(lstrip_blocks=True, trim_blocks=False)
tmpl = env.from_string('''\n\n\n{% set hello = 1 %}''')
assert tmpl.render() == '\n\n\n'
def test_lstrip_comment(self):
env = Environment(lstrip_blocks=True, trim_blocks=False)
tmpl = env.from_string(''' {# if True #}
hello
{#endif#}''')
assert tmpl.render() == '\nhello\n'
def test_lstrip_angle_bracket_simple(self):
env = Environment('<%', '%>', '${', '}', '<%#', '%>', '%', '##',
lstrip_blocks=True, trim_blocks=True)
tmpl = env.from_string(''' <% if True %>hello <% endif %>''')
assert tmpl.render() == 'hello '
def test_lstrip_angle_bracket_comment(self):
env = Environment('<%', '%>', '${', '}', '<%#', '%>', '%', '##',
lstrip_blocks=True, trim_blocks=True)
tmpl = env.from_string(''' <%# if True %>hello <%# endif %>''')
assert tmpl.render() == 'hello '
def test_lstrip_angle_bracket(self):
env = Environment('<%', '%>', '${', '}', '<%#', '%>', '%', '##',
lstrip_blocks=True, trim_blocks=True)
tmpl = env.from_string('''\
<%# regular comment %>
<% for item in seq %>
${item} ## the rest of the stuff
<% endfor %>''')
assert tmpl.render(seq=range(5)) == \
''.join('%s\n' % x for x in range(5))
def test_lstrip_angle_bracket_compact(self):
env = Environment('<%', '%>', '${', '}', '<%#', '%>', '%', '##',
lstrip_blocks=True, trim_blocks=True)
tmpl = env.from_string('''\
<%#regular comment%>
<%for item in seq%>
${item} ## the rest of the stuff
<%endfor%>''')
assert tmpl.render(seq=range(5)) == \
''.join('%s\n' % x for x in range(5))
def test_php_syntax_with_manual(self):
env = Environment('<?', '?>', '<?=', '?>', '<!--', '-->',
lstrip_blocks=True, trim_blocks=True)
tmpl = env.from_string('''\
<!-- I'm a comment, I'm not interesting -->
<? for item in seq -?>
<?= item ?>
<?- endfor ?>''')
assert tmpl.render(seq=range(5)) == '01234'
def test_php_syntax(self):
env = Environment('<?', '?>', '<?=', '?>', '<!--', '-->',
lstrip_blocks=True, trim_blocks=True)
tmpl = env.from_string('''\
<!-- I'm a comment, I'm not interesting -->
<? for item in seq ?>
<?= item ?>
<? endfor ?>''')
assert tmpl.render(seq=range(5)) == ''.join(' %s\n' % x for x in range(5))
def test_php_syntax_compact(self):
env = Environment('<?', '?>', '<?=', '?>', '<!--', '-->',
lstrip_blocks=True, trim_blocks=True)
tmpl = env.from_string('''\
<!-- I'm a comment, I'm not interesting -->
<?for item in seq?>
<?=item?>
<?endfor?>''')
assert tmpl.render(seq=range(5)) == ''.join(' %s\n' % x for x in range(5))
def test_erb_syntax(self):
env = Environment('<%', '%>', '<%=', '%>', '<%#', '%>',
lstrip_blocks=True, trim_blocks=True)
#env.from_string('')
#for n,r in env.lexer.rules.iteritems():
# print n
#print env.lexer.rules['root'][0][0].pattern
#print "'%s'" % tmpl.render(seq=range(5))
tmpl = env.from_string('''\
<%# I'm a comment, I'm not interesting %>
<% for item in seq %>
<%= item %>
<% endfor %>
''')
assert tmpl.render(seq=range(5)) == ''.join(' %s\n' % x for x in range(5))
def test_erb_syntax_with_manual(self):
env = Environment('<%', '%>', '<%=', '%>', '<%#', '%>',
lstrip_blocks=True, trim_blocks=True)
tmpl = env.from_string('''\
<%# I'm a comment, I'm not interesting %>
<% for item in seq -%>
<%= item %>
<%- endfor %>''')
assert tmpl.render(seq=range(5)) == '01234'
def test_erb_syntax_no_lstrip(self):
env = Environment('<%', '%>', '<%=', '%>', '<%#', '%>',
lstrip_blocks=True, trim_blocks=True)
tmpl = env.from_string('''\
<%# I'm a comment, I'm not interesting %>
<%+ for item in seq -%>
<%= item %>
<%- endfor %>''')
assert tmpl.render(seq=range(5)) == ' 01234'
def test_comment_syntax(self):
env = Environment('<!--', '-->', '${', '}', '<!--#', '-->',
lstrip_blocks=True, trim_blocks=True)
tmpl = env.from_string('''\
<!--# I'm a comment, I'm not interesting -->\
<!-- for item in seq --->
${item}
<!--- endfor -->''')
assert tmpl.render(seq=range(5)) == '01234'
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TokenStreamTestCase))
suite.addTest(unittest.makeSuite(LexerTestCase))
suite.addTest(unittest.makeSuite(ParserTestCase))
suite.addTest(unittest.makeSuite(SyntaxTestCase))
suite.addTest(unittest.makeSuite(LstripBlocksTestCase))
return suite
| mit |
cyanna/edx-platform | lms/djangoapps/courseware/views.py | 4 | 56361 | """
Courseware views functions
"""
import logging
import urllib
import json
import cgi
from datetime import datetime
from collections import defaultdict
from django.utils import translation
from django.utils.translation import ugettext as _
from django.utils.translation import ungettext
from django.conf import settings
from django.core.context_processors import csrf
from django.core.exceptions import PermissionDenied
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User, AnonymousUser
from django.contrib.auth.decorators import login_required
from django.utils.timezone import UTC
from django.views.decorators.http import require_GET, require_POST
from django.http import Http404, HttpResponse, HttpResponseBadRequest
from django.shortcuts import redirect
from certificates import api as certs_api
from edxmako.shortcuts import render_to_response, render_to_string, marketing_link
from django_future.csrf import ensure_csrf_cookie
from django.views.decorators.cache import cache_control
from django.db import transaction
from markupsafe import escape
from courseware import grades
from courseware.access import has_access, _adjust_start_date_for_beta_testers
from courseware.courses import (
get_courses, get_course,
get_studio_url, get_course_with_access,
sort_by_announcement,
sort_by_start_date,
)
from courseware.masquerade import setup_masquerade
from courseware.model_data import FieldDataCache
from .module_render import toc_for_course, get_module_for_descriptor, get_module
from .entrance_exams import (
course_has_entrance_exam,
get_entrance_exam_content,
get_entrance_exam_score,
user_must_complete_entrance_exam,
user_has_passed_entrance_exam
)
from courseware.models import StudentModule, StudentModuleHistory
from course_modes.models import CourseMode
from lms.djangoapps.lms_xblock.models import XBlockAsidesConfig
from open_ended_grading import open_ended_notifications
from student.models import UserTestGroup, CourseEnrollment
from student.views import single_course_reverification_info, is_course_blocked
from util.cache import cache, cache_if_anonymous
from xblock.fragment import Fragment
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.exceptions import ItemNotFoundError, NoPathToItem
from xmodule.modulestore.search import path_to_location, navigation_index
from xmodule.tabs import CourseTabList, StaffGradingTab, PeerGradingTab, OpenEndedGradingTab
from xmodule.x_module import STUDENT_VIEW
import shoppingcart
from shoppingcart.models import CourseRegistrationCode
from shoppingcart.utils import is_shopping_cart_enabled
from opaque_keys import InvalidKeyError
from util.milestones_helpers import get_prerequisite_courses_display
from microsite_configuration import microsite
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from opaque_keys.edx.keys import CourseKey, UsageKey
from instructor.enrollment import uses_shib
from util.db import commit_on_success_with_read_committed
import survey.utils
import survey.views
from util.views import ensure_valid_course_key
from eventtracking import tracker
import analytics
log = logging.getLogger("edx.courseware")
template_imports = {'urllib': urllib}
CONTENT_DEPTH = 2
def user_groups(user):
"""
TODO (vshnayder): This is not used. When we have a new plan for groups, adjust appropriately.
"""
if not user.is_authenticated():
return []
# TODO: Rewrite in Django
key = 'user_group_names_{user.id}'.format(user=user)
cache_expiration = 60 * 60 # one hour
# Kill caching on dev machines -- we switch groups a lot
group_names = cache.get(key)
if settings.DEBUG:
group_names = None
if group_names is None:
group_names = [u.name for u in UserTestGroup.objects.filter(users=user)]
cache.set(key, group_names, cache_expiration)
return group_names
@ensure_csrf_cookie
@cache_if_anonymous()
def courses(request):
"""
Render "find courses" page. The course selection work is done in courseware.courses.
"""
courses = get_courses(request.user, request.META.get('HTTP_HOST'))
if microsite.get_value("ENABLE_COURSE_SORTING_BY_START_DATE",
settings.FEATURES["ENABLE_COURSE_SORTING_BY_START_DATE"]):
courses = sort_by_start_date(courses)
else:
courses = sort_by_announcement(courses)
return render_to_response("courseware/courses.html", {'courses': courses})
def render_accordion(request, course, chapter, section, field_data_cache):
"""
Draws navigation bar. Takes current position in accordion as
parameter.
If chapter and section are '' or None, renders a default accordion.
course, chapter, and section are the url_names.
Returns the html string
"""
# grab the table of contents
toc = toc_for_course(request, course, chapter, section, field_data_cache)
context = dict([
('toc', toc),
('course_id', course.id.to_deprecated_string()),
('csrf', csrf(request)['csrf_token']),
('due_date_display_format', course.due_date_display_format)
] + template_imports.items())
return render_to_string('courseware/accordion.html', context)
def get_current_child(xmodule, min_depth=None):
"""
Get the xmodule.position's display item of an xmodule that has a position and
children. If xmodule has no position or is out of bounds, return the first
child with children extending down to content_depth.
For example, if chapter_one has no position set, with two child sections,
section-A having no children and section-B having a discussion unit,
`get_current_child(chapter, min_depth=1)` will return section-B.
Returns None only if there are no children at all.
"""
def _get_default_child_module(child_modules):
"""Returns the first child of xmodule, subject to min_depth."""
if not child_modules:
default_child = None
elif not min_depth > 0:
default_child = child_modules[0]
else:
content_children = [child for child in child_modules if
child.has_children_at_depth(min_depth - 1) and child.get_display_items()]
default_child = content_children[0] if content_children else None
return default_child
if not hasattr(xmodule, 'position'):
return None
if xmodule.position is None:
return _get_default_child_module(xmodule.get_display_items())
else:
# position is 1-indexed.
pos = xmodule.position - 1
children = xmodule.get_display_items()
if 0 <= pos < len(children):
child = children[pos]
elif len(children) > 0:
# module has a set position, but the position is out of range.
# return default child.
child = _get_default_child_module(children)
else:
child = None
return child
def redirect_to_course_position(course_module, content_depth):
"""
Return a redirect to the user's current place in the course.
If this is the user's first time, redirects to COURSE/CHAPTER/SECTION.
If this isn't the users's first time, redirects to COURSE/CHAPTER,
and the view will find the current section and display a message
about reusing the stored position.
If there is no current position in the course or chapter, then selects
the first child.
"""
urlargs = {'course_id': course_module.id.to_deprecated_string()}
chapter = get_current_child(course_module, min_depth=content_depth)
if chapter is None:
# oops. Something bad has happened.
raise Http404("No chapter found when loading current position in course")
urlargs['chapter'] = chapter.url_name
if course_module.position is not None:
return redirect(reverse('courseware_chapter', kwargs=urlargs))
# Relying on default of returning first child
section = get_current_child(chapter, min_depth=content_depth - 1)
if section is None:
raise Http404("No section found when loading current position in course")
urlargs['section'] = section.url_name
return redirect(reverse('courseware_section', kwargs=urlargs))
def save_child_position(seq_module, child_name):
"""
child_name: url_name of the child
"""
for position, c in enumerate(seq_module.get_display_items(), start=1):
if c.location.name == child_name:
# Only save if position changed
if position != seq_module.position:
seq_module.position = position
# Save this new position to the underlying KeyValueStore
seq_module.save()
def save_positions_recursively_up(user, request, field_data_cache, xmodule):
"""
Recurses up the course tree starting from a leaf
Saving the position property based on the previous node as it goes
"""
current_module = xmodule
while current_module:
parent_location = modulestore().get_parent_location(current_module.location)
parent = None
if parent_location:
parent_descriptor = modulestore().get_item(parent_location)
parent = get_module_for_descriptor(user, request, parent_descriptor, field_data_cache, current_module.location.course_key)
if parent and hasattr(parent, 'position'):
save_child_position(parent, current_module.location.name)
current_module = parent
def chat_settings(course, user):
"""
Returns a dict containing the settings required to connect to a
Jabber chat server and room.
"""
domain = getattr(settings, "JABBER_DOMAIN", None)
if domain is None:
log.warning('You must set JABBER_DOMAIN in the settings to '
'enable the chat widget')
return None
return {
'domain': domain,
# Jabber doesn't like slashes, so replace with dashes
'room': "{ID}_class".format(ID=course.id.replace('/', '-')),
'username': "{USER}@{DOMAIN}".format(
USER=user.username, DOMAIN=domain
),
# TODO: clearly this needs to be something other than the username
# should also be something that's not necessarily tied to a
# particular course
'password': "{USER}@{DOMAIN}".format(
USER=user.username, DOMAIN=domain
),
}
@login_required
@ensure_csrf_cookie
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
@ensure_valid_course_key
@commit_on_success_with_read_committed
def index(request, course_id, chapter=None, section=None,
position=None):
"""
Displays courseware accordion and associated content. If course, chapter,
and section are all specified, renders the page, or returns an error if they
are invalid.
If section is not specified, displays the accordion opened to the right chapter.
If neither chapter or section are specified, redirects to user's most recent
chapter, or the first chapter if this is the user's first visit.
Arguments:
- request : HTTP request
- course_id : course id (str: ORG/course/URL_NAME)
- chapter : chapter url_name (str)
- section : section url_name (str)
- position : position in module, eg of <sequential> module (str)
Returns:
- HTTPresponse
"""
course_key = CourseKey.from_string(course_id)
user = User.objects.prefetch_related("groups").get(id=request.user.id)
redeemed_registration_codes = CourseRegistrationCode.objects.filter(
course_id=course_key,
registrationcoderedemption__redeemed_by=request.user
)
# Redirect to dashboard if the course is blocked due to non-payment.
if is_course_blocked(request, redeemed_registration_codes, course_key):
# registration codes may be generated via Bulk Purchase Scenario
# we have to check only for the invoice generated registration codes
# that their invoice is valid or not
log.warning(
u'User %s cannot access the course %s because payment has not yet been received',
user,
course_key.to_deprecated_string()
)
return redirect(reverse('dashboard'))
request.user = user # keep just one instance of User
with modulestore().bulk_operations(course_key):
return _index_bulk_op(request, course_key, chapter, section, position)
# pylint: disable=too-many-statements
def _index_bulk_op(request, course_key, chapter, section, position):
"""
Render the index page for the specified course.
"""
# Verify that position a string is in fact an int
if position is not None:
try:
int(position)
except ValueError:
raise Http404("Position {} is not an integer!".format(position))
user = request.user
course = get_course_with_access(user, 'load', course_key, depth=2)
staff_access = has_access(user, 'staff', course)
registered = registered_for_course(course, user)
if not registered:
# TODO (vshnayder): do course instructors need to be registered to see course?
log.debug(u'User %s tried to view course %s but is not enrolled', user, course.location.to_deprecated_string())
return redirect(reverse('about_course', args=[course_key.to_deprecated_string()]))
# see if all pre-requisites (as per the milestones app feature) have been fulfilled
# Note that if the pre-requisite feature flag has been turned off (default) then this check will
# always pass
if not has_access(user, 'view_courseware_with_prerequisites', course):
# prerequisites have not been fulfilled therefore redirect to the Dashboard
log.info(
u'User %d tried to view course %s '
u'without fulfilling prerequisites',
user.id, unicode(course.id))
return redirect(reverse('dashboard'))
# Entrance Exam Check
# If the course has an entrance exam and the requested chapter is NOT the entrance exam, and
# the user hasn't yet met the criteria to bypass the entrance exam, redirect them to the exam.
if chapter and course_has_entrance_exam(course):
chapter_descriptor = course.get_child_by(lambda m: m.location.name == chapter)
if chapter_descriptor and not getattr(chapter_descriptor, 'is_entrance_exam', False) \
and user_must_complete_entrance_exam(request, user, course):
log.info(u'User %d tried to view course %s without passing entrance exam', user.id, unicode(course.id))
return redirect(reverse('courseware', args=[unicode(course.id)]))
# check to see if there is a required survey that must be taken before
# the user can access the course.
if survey.utils.must_answer_survey(course, user):
return redirect(reverse('course_survey', args=[unicode(course.id)]))
masquerade = setup_masquerade(request, course_key, staff_access)
try:
field_data_cache = FieldDataCache.cache_for_descriptor_descendents(
course_key, user, course, depth=2)
course_module = get_module_for_descriptor(user, request, course, field_data_cache, course_key)
if course_module is None:
log.warning(u'If you see this, something went wrong: if we got this'
u' far, should have gotten a course module for this user')
return redirect(reverse('about_course', args=[course_key.to_deprecated_string()]))
studio_url = get_studio_url(course, 'course')
context = {
'csrf': csrf(request)['csrf_token'],
'accordion': render_accordion(request, course, chapter, section, field_data_cache),
'COURSE_TITLE': course.display_name_with_default,
'course': course,
'init': '',
'fragment': Fragment(),
'staff_access': staff_access,
'studio_url': studio_url,
'masquerade': masquerade,
'xqa_server': settings.FEATURES.get('USE_XQA_SERVER', 'http://xqa:server@content-qa.mitx.mit.edu/xqa'),
'reverifications': fetch_reverify_banner_info(request, course_key),
}
now = datetime.now(UTC())
effective_start = _adjust_start_date_for_beta_testers(user, course, course_key)
if staff_access and now < effective_start:
# Disable student view button if user is staff and
# course is not yet visible to students.
context['disable_student_access'] = True
has_content = course.has_children_at_depth(CONTENT_DEPTH)
if not has_content:
# Show empty courseware for a course with no units
return render_to_response('courseware/courseware.html', context)
elif chapter is None:
# Check first to see if we should instead redirect the user to an Entrance Exam
if course_has_entrance_exam(course):
exam_chapter = get_entrance_exam_content(request, course)
if exam_chapter:
exam_section = None
if exam_chapter.get_children():
exam_section = exam_chapter.get_children()[0]
if exam_section:
return redirect('courseware_section',
course_id=unicode(course_key),
chapter=exam_chapter.url_name,
section=exam_section.url_name)
# passing CONTENT_DEPTH avoids returning 404 for a course with an
# empty first section and a second section with content
return redirect_to_course_position(course_module, CONTENT_DEPTH)
# Only show the chat if it's enabled by the course and in the
# settings.
show_chat = course.show_chat and settings.FEATURES['ENABLE_CHAT']
if show_chat:
context['chat'] = chat_settings(course, user)
# If we couldn't load the chat settings, then don't show
# the widget in the courseware.
if context['chat'] is None:
show_chat = False
context['show_chat'] = show_chat
chapter_descriptor = course.get_child_by(lambda m: m.location.name == chapter)
if chapter_descriptor is not None:
save_child_position(course_module, chapter)
else:
raise Http404('No chapter descriptor found with name {}'.format(chapter))
chapter_module = course_module.get_child_by(lambda m: m.location.name == chapter)
if chapter_module is None:
# User may be trying to access a chapter that isn't live yet
if masquerade and masquerade.role == 'student': # if staff is masquerading as student be kinder, don't 404
log.debug('staff masquerading as student: no chapter %s', chapter)
return redirect(reverse('courseware', args=[course.id.to_deprecated_string()]))
raise Http404
if course_has_entrance_exam(course):
# Message should not appear outside the context of entrance exam subsection.
# if section is none then we don't need to show message on welcome back screen also.
if getattr(chapter_module, 'is_entrance_exam', False) and section is not None:
context['entrance_exam_current_score'] = get_entrance_exam_score(request, course)
context['entrance_exam_passed'] = user_has_passed_entrance_exam(request, course)
if section is not None:
section_descriptor = chapter_descriptor.get_child_by(lambda m: m.location.name == section)
if section_descriptor is None:
# Specifically asked-for section doesn't exist
if masquerade and masquerade.role == 'student': # don't 404 if staff is masquerading as student
log.debug('staff masquerading as student: no section %s', section)
return redirect(reverse('courseware', args=[course.id.to_deprecated_string()]))
raise Http404
## Allow chromeless operation
if section_descriptor.chrome:
chrome = [s.strip() for s in section_descriptor.chrome.lower().split(",")]
if 'accordion' not in chrome:
context['disable_accordion'] = True
if 'tabs' not in chrome:
context['disable_tabs'] = True
if section_descriptor.default_tab:
context['default_tab'] = section_descriptor.default_tab
# cdodge: this looks silly, but let's refetch the section_descriptor with depth=None
# which will prefetch the children more efficiently than doing a recursive load
section_descriptor = modulestore().get_item(section_descriptor.location, depth=None)
# Load all descendants of the section, because we're going to display its
# html, which in general will need all of its children
field_data_cache.add_descriptor_descendents(
section_descriptor, depth=None
)
section_module = get_module_for_descriptor(
request.user,
request,
section_descriptor,
field_data_cache,
course_key,
position
)
if section_module is None:
# User may be trying to be clever and access something
# they don't have access to.
raise Http404
# Save where we are in the chapter
save_child_position(chapter_module, section)
context['fragment'] = section_module.render(STUDENT_VIEW)
context['section_title'] = section_descriptor.display_name_with_default
else:
# section is none, so display a message
studio_url = get_studio_url(course, 'course')
prev_section = get_current_child(chapter_module)
if prev_section is None:
# Something went wrong -- perhaps this chapter has no sections visible to the user.
# Clearing out the last-visited state and showing "first-time" view by redirecting
# to courseware.
course_module.position = None
course_module.save()
return redirect(reverse('courseware', args=[course.id.to_deprecated_string()]))
prev_section_url = reverse('courseware_section', kwargs={
'course_id': course_key.to_deprecated_string(),
'chapter': chapter_descriptor.url_name,
'section': prev_section.url_name
})
context['fragment'] = Fragment(content=render_to_string(
'courseware/welcome-back.html',
{
'course': course,
'studio_url': studio_url,
'chapter_module': chapter_module,
'prev_section': prev_section,
'prev_section_url': prev_section_url
}
))
result = render_to_response('courseware/courseware.html', context)
except Exception as e:
# Doesn't bar Unicode characters from URL, but if Unicode characters do
# cause an error it is a graceful failure.
if isinstance(e, UnicodeEncodeError):
raise Http404("URL contains Unicode characters")
if isinstance(e, Http404):
# let it propagate
raise
# In production, don't want to let a 500 out for any reason
if settings.DEBUG:
raise
else:
log.exception(
u"Error in index view: user={user}, course={course}, chapter={chapter}"
u" section={section} position={position}".format(
user=user,
course=course,
chapter=chapter,
section=section,
position=position
))
try:
result = render_to_response('courseware/courseware-error.html', {
'staff_access': staff_access,
'course': course
})
except:
# Let the exception propagate, relying on global config to at
# at least return a nice error message
log.exception("Error while rendering courseware-error page")
raise
return result
@ensure_csrf_cookie
@ensure_valid_course_key
def jump_to_id(request, course_id, module_id):
"""
This entry point allows for a shorter version of a jump to where just the id of the element is
passed in. This assumes that id is unique within the course_id namespace
"""
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
items = modulestore().get_items(course_key, qualifiers={'name': module_id})
if len(items) == 0:
raise Http404(
u"Could not find id: {0} in course_id: {1}. Referer: {2}".format(
module_id, course_id, request.META.get("HTTP_REFERER", "")
))
if len(items) > 1:
log.warning(
u"Multiple items found with id: {0} in course_id: {1}. Referer: {2}. Using first: {3}".format(
module_id, course_id, request.META.get("HTTP_REFERER", ""), items[0].location.to_deprecated_string()
))
return jump_to(request, course_id, items[0].location.to_deprecated_string())
@ensure_csrf_cookie
def jump_to(_request, course_id, location):
"""
Show the page that contains a specific location.
If the location is invalid or not in any class, return a 404.
Otherwise, delegates to the index view to figure out whether this user
has access, and what they should see.
"""
try:
course_key = CourseKey.from_string(course_id)
usage_key = UsageKey.from_string(location).replace(course_key=course_key)
except InvalidKeyError:
raise Http404(u"Invalid course_key or usage_key")
try:
(course_key, chapter, section, position) = path_to_location(modulestore(), usage_key)
except ItemNotFoundError:
raise Http404(u"No data at this location: {0}".format(usage_key))
except NoPathToItem:
raise Http404(u"This location is not in any class: {0}".format(usage_key))
# choose the appropriate view (and provide the necessary args) based on the
# args provided by the redirect.
# Rely on index to do all error handling and access control.
if chapter is None:
return redirect('courseware', course_id=unicode(course_key))
elif section is None:
return redirect('courseware_chapter', course_id=unicode(course_key), chapter=chapter)
elif position is None:
return redirect(
'courseware_section',
course_id=unicode(course_key),
chapter=chapter,
section=section
)
else:
# Here we use the navigation_index from the position returned from
# path_to_location - we can only navigate to the topmost vertical at the
# moment
return redirect(
'courseware_position',
course_id=unicode(course_key),
chapter=chapter,
section=section,
position=navigation_index(position)
)
@ensure_csrf_cookie
@ensure_valid_course_key
def course_info(request, course_id):
"""
Display the course's info.html, or 404 if there is no such course.
Assumes the course_id is in a valid format.
"""
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
with modulestore().bulk_operations(course_key):
course = get_course_with_access(request.user, 'load', course_key)
# If the user needs to take an entrance exam to access this course, then we'll need
# to send them to that specific course module before allowing them into other areas
if user_must_complete_entrance_exam(request, request.user, course):
return redirect(reverse('courseware', args=[unicode(course.id)]))
# check to see if there is a required survey that must be taken before
# the user can access the course.
if request.user.is_authenticated() and survey.utils.must_answer_survey(course, request.user):
return redirect(reverse('course_survey', args=[unicode(course.id)]))
staff_access = has_access(request.user, 'staff', course)
masquerade = setup_masquerade(request, course_key, staff_access) # allow staff to masquerade on the info page
reverifications = fetch_reverify_banner_info(request, course_key)
studio_url = get_studio_url(course, 'course_info')
# link to where the student should go to enroll in the course:
# about page if there is not marketing site, SITE_NAME if there is
url_to_enroll = reverse(course_about, args=[course_id])
if settings.FEATURES.get('ENABLE_MKTG_SITE'):
url_to_enroll = marketing_link('COURSES')
show_enroll_banner = request.user.is_authenticated() and not CourseEnrollment.is_enrolled(request.user, course.id)
context = {
'request': request,
'course_id': course_key.to_deprecated_string(),
'cache': None,
'course': course,
'staff_access': staff_access,
'masquerade': masquerade,
'studio_url': studio_url,
'reverifications': reverifications,
'show_enroll_banner': show_enroll_banner,
'url_to_enroll': url_to_enroll,
}
now = datetime.now(UTC())
effective_start = _adjust_start_date_for_beta_testers(request.user, course, course_key)
if staff_access and now < effective_start:
# Disable student view button if user is staff and
# course is not yet visible to students.
context['disable_student_access'] = True
return render_to_response('courseware/info.html', context)
@ensure_csrf_cookie
@ensure_valid_course_key
def static_tab(request, course_id, tab_slug):
"""
Display the courses tab with the given name.
Assumes the course_id is in a valid format.
"""
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
course = get_course_with_access(request.user, 'load', course_key)
tab = CourseTabList.get_tab_by_slug(course.tabs, tab_slug)
if tab is None:
raise Http404
contents = get_static_tab_contents(
request,
course,
tab
)
if contents is None:
raise Http404
return render_to_response('courseware/static_tab.html', {
'course': course,
'tab': tab,
'tab_contents': contents,
})
# TODO arjun: remove when custom tabs in place, see courseware/syllabus.py
@ensure_csrf_cookie
@ensure_valid_course_key
def syllabus(request, course_id):
"""
Display the course's syllabus.html, or 404 if there is no such course.
Assumes the course_id is in a valid format.
"""
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
course = get_course_with_access(request.user, 'load', course_key)
staff_access = has_access(request.user, 'staff', course)
return render_to_response('courseware/syllabus.html', {
'course': course,
'staff_access': staff_access,
})
def registered_for_course(course, user):
"""
Return True if user is registered for course, else False
"""
if user is None:
return False
if user.is_authenticated():
return CourseEnrollment.is_enrolled(user, course.id)
else:
return False
def get_cosmetic_display_price(course, registration_price):
"""
Return Course Price as a string preceded by correct currency, or 'Free'
"""
currency_symbol = settings.PAID_COURSE_REGISTRATION_CURRENCY[1]
price = course.cosmetic_display_price
if registration_price > 0:
price = registration_price
if price:
# Translators: This will look like '$50', where {currency_symbol} is a symbol such as '$' and {price} is a
# numerical amount in that currency. Adjust this display as needed for your language.
return _("{currency_symbol}{price}").format(currency_symbol=currency_symbol, price=price)
else:
# Translators: This refers to the cost of the course. In this case, the course costs nothing so it is free.
return _('Free')
@ensure_csrf_cookie
@cache_if_anonymous()
def course_about(request, course_id):
"""
Display the course's about page.
Assumes the course_id is in a valid format.
"""
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
with modulestore().bulk_operations(course_key):
permission_name = microsite.get_value(
'COURSE_ABOUT_VISIBILITY_PERMISSION',
settings.COURSE_ABOUT_VISIBILITY_PERMISSION
)
course = get_course_with_access(request.user, permission_name, course_key)
if microsite.get_value('ENABLE_MKTG_SITE', settings.FEATURES.get('ENABLE_MKTG_SITE', False)):
return redirect(reverse('info', args=[course.id.to_deprecated_string()]))
registered = registered_for_course(course, request.user)
staff_access = has_access(request.user, 'staff', course)
studio_url = get_studio_url(course, 'settings/details')
if has_access(request.user, 'load', course):
course_target = reverse('info', args=[course.id.to_deprecated_string()])
else:
course_target = reverse('about_course', args=[course.id.to_deprecated_string()])
show_courseware_link = (
(
has_access(request.user, 'load', course)
and has_access(request.user, 'view_courseware_with_prerequisites', course)
)
or settings.FEATURES.get('ENABLE_LMS_MIGRATION')
)
# Note: this is a flow for payment for course registration, not the Verified Certificate flow.
registration_price = 0
in_cart = False
reg_then_add_to_cart_link = ""
_is_shopping_cart_enabled = is_shopping_cart_enabled()
if _is_shopping_cart_enabled:
registration_price = CourseMode.min_course_price_for_currency(course_key,
settings.PAID_COURSE_REGISTRATION_CURRENCY[0])
if request.user.is_authenticated():
cart = shoppingcart.models.Order.get_cart_for_user(request.user)
in_cart = shoppingcart.models.PaidCourseRegistration.contained_in_order(cart, course_key) or \
shoppingcart.models.CourseRegCodeItem.contained_in_order(cart, course_key)
reg_then_add_to_cart_link = "{reg_url}?course_id={course_id}&enrollment_action=add_to_cart".format(
reg_url=reverse('register_user'), course_id=course.id.to_deprecated_string())
course_price = get_cosmetic_display_price(course, registration_price)
can_add_course_to_cart = _is_shopping_cart_enabled and registration_price
# Used to provide context to message to student if enrollment not allowed
can_enroll = has_access(request.user, 'enroll', course)
invitation_only = course.invitation_only
is_course_full = CourseEnrollment.is_course_full(course)
# Register button should be disabled if one of the following is true:
# - Student is already registered for course
# - Course is already full
# - Student cannot enroll in course
active_reg_button = not(registered or is_course_full or not can_enroll)
is_shib_course = uses_shib(course)
# get prerequisite courses display names
pre_requisite_courses = get_prerequisite_courses_display(course)
return render_to_response('courseware/course_about.html', {
'course': course,
'staff_access': staff_access,
'studio_url': studio_url,
'registered': registered,
'course_target': course_target,
'is_cosmetic_price_enabled': settings.FEATURES.get('ENABLE_COSMETIC_DISPLAY_PRICE'),
'course_price': course_price,
'in_cart': in_cart,
'reg_then_add_to_cart_link': reg_then_add_to_cart_link,
'show_courseware_link': show_courseware_link,
'is_course_full': is_course_full,
'can_enroll': can_enroll,
'invitation_only': invitation_only,
'active_reg_button': active_reg_button,
'is_shib_course': is_shib_course,
# We do not want to display the internal courseware header, which is used when the course is found in the
# context. This value is therefor explicitly set to render the appropriate header.
'disable_courseware_header': True,
'can_add_course_to_cart': can_add_course_to_cart,
'cart_link': reverse('shoppingcart.views.show_cart'),
'pre_requisite_courses': pre_requisite_courses
})
@ensure_csrf_cookie
@cache_if_anonymous('org')
@ensure_valid_course_key
def mktg_course_about(request, course_id):
"""This is the button that gets put into an iframe on the Drupal site."""
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
try:
permission_name = microsite.get_value(
'COURSE_ABOUT_VISIBILITY_PERMISSION',
settings.COURSE_ABOUT_VISIBILITY_PERMISSION
)
course = get_course_with_access(request.user, permission_name, course_key)
except (ValueError, Http404):
# If a course does not exist yet, display a "Coming Soon" button
return render_to_response(
'courseware/mktg_coming_soon.html', {'course_id': course_key.to_deprecated_string()}
)
registered = registered_for_course(course, request.user)
if has_access(request.user, 'load', course):
course_target = reverse('info', args=[course.id.to_deprecated_string()])
else:
course_target = reverse('about_course', args=[course.id.to_deprecated_string()])
allow_registration = has_access(request.user, 'enroll', course)
show_courseware_link = (has_access(request.user, 'load', course) or
settings.FEATURES.get('ENABLE_LMS_MIGRATION'))
course_modes = CourseMode.modes_for_course_dict(course.id)
context = {
'course': course,
'registered': registered,
'allow_registration': allow_registration,
'course_target': course_target,
'show_courseware_link': show_courseware_link,
'course_modes': course_modes,
}
# The edx.org marketing site currently displays only in English.
# To avoid displaying a different language in the register / access button,
# we force the language to English.
# However, OpenEdX installations with a different marketing front-end
# may want to respect the language specified by the user or the site settings.
force_english = settings.FEATURES.get('IS_EDX_DOMAIN', False)
if force_english:
translation.activate('en-us')
if settings.FEATURES.get('ENABLE_MKTG_EMAIL_OPT_IN'):
# Drupal will pass organization names using a GET parameter, as follows:
# ?org=Harvard
# ?org=Harvard,MIT
# If no full names are provided, the marketing iframe won't show the
# email opt-in checkbox.
org = request.GET.get('org')
if org:
org_list = org.split(',')
# HTML-escape the provided organization names
org_list = [cgi.escape(org) for org in org_list]
if len(org_list) > 1:
if len(org_list) > 2:
# Translators: The join of three or more institution names (e.g., Harvard, MIT, and Dartmouth).
org_name_string = _("{first_institutions}, and {last_institution}").format(
first_institutions=u", ".join(org_list[:-1]),
last_institution=org_list[-1]
)
else:
# Translators: The join of two institution names (e.g., Harvard and MIT).
org_name_string = _("{first_institution} and {second_institution}").format(
first_institution=org_list[0],
second_institution=org_list[1]
)
else:
org_name_string = org_list[0]
context['checkbox_label'] = ungettext(
"I would like to receive email from {institution_series} and learn about its other programs.",
"I would like to receive email from {institution_series} and learn about their other programs.",
len(org_list)
).format(institution_series=org_name_string)
try:
return render_to_response('courseware/mktg_course_about.html', context)
finally:
# Just to be safe, reset the language if we forced it to be English.
if force_english:
translation.deactivate()
@login_required
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
@transaction.commit_manually
@ensure_valid_course_key
def progress(request, course_id, student_id=None):
"""
Wraps "_progress" with the manual_transaction context manager just in case
there are unanticipated errors.
"""
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
with modulestore().bulk_operations(course_key):
with grades.manual_transaction():
return _progress(request, course_key, student_id)
def _progress(request, course_key, student_id):
"""
Unwrapped version of "progress".
User progress. We show the grade bar and every problem score.
Course staff are allowed to see the progress of students in their class.
"""
course = get_course_with_access(request.user, 'load', course_key, depth=None, check_if_enrolled=True)
# check to see if there is a required survey that must be taken before
# the user can access the course.
if survey.utils.must_answer_survey(course, request.user):
return redirect(reverse('course_survey', args=[unicode(course.id)]))
staff_access = has_access(request.user, 'staff', course)
if student_id is None or student_id == request.user.id:
# always allowed to see your own profile
student = request.user
else:
# Requesting access to a different student's profile
if not staff_access:
raise Http404
try:
student = User.objects.get(id=student_id)
# Check for ValueError if 'student_id' cannot be converted to integer.
except (ValueError, User.DoesNotExist):
raise Http404
# NOTE: To make sure impersonation by instructor works, use
# student instead of request.user in the rest of the function.
# The pre-fetching of groups is done to make auth checks not require an
# additional DB lookup (this kills the Progress page in particular).
student = User.objects.prefetch_related("groups").get(id=student.id)
courseware_summary = grades.progress_summary(student, request, course)
studio_url = get_studio_url(course, 'settings/grading')
grade_summary = grades.grade(student, request, course)
if courseware_summary is None:
#This means the student didn't have access to the course (which the instructor requested)
raise Http404
# checking certificate generation configuration
show_generate_cert_btn = certs_api.cert_generation_enabled(course_key)
context = {
'course': course,
'courseware_summary': courseware_summary,
'studio_url': studio_url,
'grade_summary': grade_summary,
'staff_access': staff_access,
'student': student,
'reverifications': fetch_reverify_banner_info(request, course_key),
'passed': is_course_passed(course, grade_summary),
'show_generate_cert_btn': show_generate_cert_btn
}
if show_generate_cert_btn:
context.update(certs_api.certificate_downloadable_status(student, course_key))
with grades.manual_transaction():
response = render_to_response('courseware/progress.html', context)
return response
def fetch_reverify_banner_info(request, course_key):
"""
Fetches needed context variable to display reverification banner in courseware
"""
reverifications = defaultdict(list)
user = request.user
if not user.id:
return reverifications
enrollment = CourseEnrollment.get_enrollment(request.user, course_key)
if enrollment is not None:
course = modulestore().get_course(course_key)
info = single_course_reverification_info(user, course, enrollment)
if info:
reverifications[info.status].append(info)
return reverifications
@login_required
@ensure_valid_course_key
def submission_history(request, course_id, student_username, location):
"""Render an HTML fragment (meant for inclusion elsewhere) that renders a
history of all state changes made by this user for this problem location.
Right now this only works for problems because that's all
StudentModuleHistory records.
"""
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
try:
usage_key = course_key.make_usage_key_from_deprecated_string(location)
except (InvalidKeyError, AssertionError):
return HttpResponse(escape(_(u'Invalid location.')))
course = get_course_with_access(request.user, 'load', course_key)
staff_access = has_access(request.user, 'staff', course)
# Permission Denied if they don't have staff access and are trying to see
# somebody else's submission history.
if (student_username != request.user.username) and (not staff_access):
raise PermissionDenied
try:
student = User.objects.get(username=student_username)
student_module = StudentModule.objects.get(
course_id=course_key,
module_state_key=usage_key,
student_id=student.id
)
except User.DoesNotExist:
return HttpResponse(escape(_(u'User {username} does not exist.').format(username=student_username)))
except StudentModule.DoesNotExist:
return HttpResponse(escape(_(u'User {username} has never accessed problem {location}').format(
username=student_username,
location=location
)))
history_entries = StudentModuleHistory.objects.filter(
student_module=student_module
).order_by('-id')
# If no history records exist, let's force a save to get history started.
if not history_entries:
student_module.save()
history_entries = StudentModuleHistory.objects.filter(
student_module=student_module
).order_by('-id')
context = {
'history_entries': history_entries,
'username': student.username,
'location': location,
'course_id': course_key.to_deprecated_string()
}
return render_to_response('courseware/submission_history.html', context)
def notification_image_for_tab(course_tab, user, course):
"""
Returns the notification image path for the given course_tab if applicable, otherwise None.
"""
tab_notification_handlers = {
StaffGradingTab.type: open_ended_notifications.staff_grading_notifications,
PeerGradingTab.type: open_ended_notifications.peer_grading_notifications,
OpenEndedGradingTab.type: open_ended_notifications.combined_notifications
}
if course_tab.type in tab_notification_handlers:
notifications = tab_notification_handlers[course_tab.type](course, user)
if notifications and notifications['pending_grading']:
return notifications['img_path']
return None
def get_static_tab_contents(request, course, tab):
"""
Returns the contents for the given static tab
"""
loc = course.id.make_usage_key(
tab.type,
tab.url_slug,
)
field_data_cache = FieldDataCache.cache_for_descriptor_descendents(
course.id, request.user, modulestore().get_item(loc), depth=0
)
tab_module = get_module(
request.user, request, loc, field_data_cache, static_asset_path=course.static_asset_path
)
logging.debug('course_module = {0}'.format(tab_module))
html = ''
if tab_module is not None:
try:
html = tab_module.render(STUDENT_VIEW).content
except Exception: # pylint: disable=broad-except
html = render_to_string('courseware/error-message.html', None)
log.exception(
u"Error rendering course={course}, tab={tab_url}".format(course=course, tab_url=tab['url_slug'])
)
return html
@require_GET
@ensure_valid_course_key
def get_course_lti_endpoints(request, course_id):
"""
View that, given a course_id, returns the a JSON object that enumerates all of the LTI endpoints for that course.
The LTI 2.0 result service spec at
http://www.imsglobal.org/lti/ltiv2p0/uml/purl.imsglobal.org/vocab/lis/v2/outcomes/Result/service.html
says "This specification document does not prescribe a method for discovering the endpoint URLs." This view
function implements one way of discovering these endpoints, returning a JSON array when accessed.
Arguments:
request (django request object): the HTTP request object that triggered this view function
course_id (unicode): id associated with the course
Returns:
(django response object): HTTP response. 404 if course is not found, otherwise 200 with JSON body.
"""
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
try:
course = get_course(course_key, depth=2)
except ValueError:
return HttpResponse(status=404)
anonymous_user = AnonymousUser()
anonymous_user.known = False # make these "noauth" requests like module_render.handle_xblock_callback_noauth
lti_descriptors = modulestore().get_items(course.id, qualifiers={'category': 'lti'})
lti_noauth_modules = [
get_module_for_descriptor(
anonymous_user,
request,
descriptor,
FieldDataCache.cache_for_descriptor_descendents(
course_key,
anonymous_user,
descriptor
),
course_key
)
for descriptor in lti_descriptors
]
endpoints = [
{
'display_name': module.display_name,
'lti_2_0_result_service_json_endpoint': module.get_outcome_service_url(
service_name='lti_2_0_result_rest_handler') + "/user/{anon_user_id}",
'lti_1_1_result_service_xml_endpoint': module.get_outcome_service_url(
service_name='grade_handler'),
}
for module in lti_noauth_modules
]
return HttpResponse(json.dumps(endpoints), content_type='application/json')
@login_required
def course_survey(request, course_id):
"""
URL endpoint to present a survey that is associated with a course_id
Note that the actual implementation of course survey is handled in the
views.py file in the Survey Djangoapp
"""
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
course = get_course_with_access(request.user, 'load', course_key)
redirect_url = reverse('info', args=[course_id])
# if there is no Survey associated with this course,
# then redirect to the course instead
if not course.course_survey_name:
return redirect(redirect_url)
return survey.views.view_student_survey(
request.user,
course.course_survey_name,
course=course,
redirect_url=redirect_url,
is_required=course.course_survey_required,
)
def is_course_passed(course, grade_summary=None, student=None, request=None):
"""
check user's course passing status. return True if passed
Arguments:
course : course object
grade_summary (dict) : contains student grade details.
student : user object
request (HttpRequest)
Returns:
returns bool value
"""
nonzero_cutoffs = [cutoff for cutoff in course.grade_cutoffs.values() if cutoff > 0]
success_cutoff = min(nonzero_cutoffs) if nonzero_cutoffs else None
if grade_summary is None:
grade_summary = grades.grade(student, request, course)
return success_cutoff and grade_summary['percent'] > success_cutoff
@require_POST
def generate_user_cert(request, course_id):
"""Start generating a new certificate for the user.
Certificate generation is allowed if:
* The user has passed the course, and
* The user does not already have a pending/completed certificate.
Note that if an error occurs during certificate generation
(for example, if the queue is down), then we simply mark the
certificate generation task status as "error" and re-run
the task with a management command. To students, the certificate
will appear to be "generating" until it is re-run.
Args:
request (HttpRequest): The POST request to this view.
course_id (unicode): The identifier for the course.
Returns:
HttpResponse: 200 on success, 400 if a new certificate cannot be generated.
"""
if not request.user.is_authenticated():
log.info(u"Anon user trying to generate certificate for %s", course_id)
return HttpResponseBadRequest(
_('You must be signed in to {platform_name} to create a certificate.').format(
platform_name=settings.PLATFORM_NAME
)
)
student = request.user
course_key = CourseKey.from_string(course_id)
course = modulestore().get_course(course_key, depth=2)
if not course:
return HttpResponseBadRequest(_("Course is not valid"))
if not is_course_passed(course, None, student, request):
return HttpResponseBadRequest(_("Your certificate will be available when you pass the course."))
certificate_status = certs_api.certificate_downloadable_status(student, course.id)
if certificate_status["is_downloadable"]:
return HttpResponseBadRequest(_("Certificate has already been created."))
elif certificate_status["is_generating"]:
return HttpResponseBadRequest(_("Certificate is already being created."))
else:
# If the certificate is not already in-process or completed,
# then create a new certificate generation task.
# If the certificate cannot be added to the queue, this will
# mark the certificate with "error" status, so it can be re-run
# with a management command. From the user's perspective,
# it will appear that the certificate task was submitted successfully.
certs_api.generate_user_certificates(student, course.id)
_track_successful_certificate_generation(student.id, course.id)
return HttpResponse()
def _track_successful_certificate_generation(user_id, course_id): # pylint: disable=invalid-name
"""Track an successfully certificate generation event.
Arguments:
user_id (str): The ID of the user generting the certificate.
course_id (CourseKey): Identifier for the course.
Returns:
None
"""
if settings.FEATURES.get('SEGMENT_IO_LMS') and hasattr(settings, 'SEGMENT_IO_LMS_KEY'):
event_name = 'edx.bi.user.certificate.generate' # pylint: disable=no-member
tracking_context = tracker.get_tracker().resolve_context() # pylint: disable=no-member
analytics.track(
user_id,
event_name,
{
'category': 'certificates',
'label': unicode(course_id)
},
context={
'Google Analytics': {
'clientId': tracking_context.get('client_id')
}
}
)
| agpl-3.0 |
hbutau/vimdotfiles | pymode/autopep8.py | 8 | 120700 | #!/usr/bin/env python
# Copyright (C) 2010-2011 Hideo Hattori
# Copyright (C) 2011-2013 Hideo Hattori, Steven Myint
# Copyright (C) 2013-2015 Hideo Hattori, Steven Myint, Bill Wendling
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""Automatically formats Python code to conform to the PEP 8 style guide.
Fixes that only need be done once can be added by adding a function of the form
"fix_<code>(source)" to this module. They should return the fixed source code.
These fixes are picked up by apply_global_fixes().
Fixes that depend on pep8 should be added as methods to FixPEP8. See the class
documentation for more information.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import codecs
import collections
import copy
import difflib
import fnmatch
import inspect
import io
import keyword
import locale
import os
import re
import signal
import sys
import textwrap
import token
import tokenize
import pep8
try:
unicode
except NameError:
unicode = str
__version__ = '1.2.1a0'
CR = '\r'
LF = '\n'
CRLF = '\r\n'
PYTHON_SHEBANG_REGEX = re.compile(r'^#!.*\bpython[23]?\b\s*$')
# For generating line shortening candidates.
SHORTEN_OPERATOR_GROUPS = frozenset([
frozenset([',']),
frozenset(['%']),
frozenset([',', '(', '[', '{']),
frozenset(['%', '(', '[', '{']),
frozenset([',', '(', '[', '{', '%', '+', '-', '*', '/', '//']),
frozenset(['%', '+', '-', '*', '/', '//']),
])
DEFAULT_IGNORE = 'E24'
DEFAULT_INDENT_SIZE = 4
# W602 is handled separately due to the need to avoid "with_traceback".
CODE_TO_2TO3 = {
'E231': ['ws_comma'],
'E721': ['idioms'],
'W601': ['has_key'],
'W603': ['ne'],
'W604': ['repr'],
'W690': ['apply',
'except',
'exitfunc',
'numliterals',
'operator',
'paren',
'reduce',
'renames',
'standarderror',
'sys_exc',
'throw',
'tuple_params',
'xreadlines']}
if sys.platform == 'win32': # pragma: no cover
DEFAULT_CONFIG = os.path.expanduser(r'~\.pep8')
else:
DEFAULT_CONFIG = os.path.join(os.getenv('XDG_CONFIG_HOME') or
os.path.expanduser('~/.config'), 'pep8')
PROJECT_CONFIG = ('setup.cfg', 'tox.ini', '.pep8')
def open_with_encoding(filename, encoding=None, mode='r'):
"""Return opened file with a specific encoding."""
if not encoding:
encoding = detect_encoding(filename)
return io.open(filename, mode=mode, encoding=encoding,
newline='') # Preserve line endings
def detect_encoding(filename):
"""Return file encoding."""
try:
with open(filename, 'rb') as input_file:
from lib2to3.pgen2 import tokenize as lib2to3_tokenize
encoding = lib2to3_tokenize.detect_encoding(input_file.readline)[0]
# Check for correctness of encoding
with open_with_encoding(filename, encoding) as test_file:
test_file.read()
return encoding
except (LookupError, SyntaxError, UnicodeDecodeError):
return 'latin-1'
def readlines_from_file(filename):
"""Return contents of file."""
with open_with_encoding(filename) as input_file:
return input_file.readlines()
def extended_blank_lines(logical_line,
blank_lines,
blank_before,
indent_level,
previous_logical):
"""Check for missing blank lines after class declaration."""
if previous_logical.startswith('class '):
if logical_line.startswith(('def ', 'class ', '@')):
if indent_level and not blank_lines and not blank_before:
yield (0, 'E309 expected 1 blank line after class declaration')
elif previous_logical.startswith('def '):
if blank_lines and pep8.DOCSTRING_REGEX.match(logical_line):
yield (0, 'E303 too many blank lines ({0})'.format(blank_lines))
elif pep8.DOCSTRING_REGEX.match(previous_logical):
# Missing blank line between class docstring and method declaration.
if (
indent_level and
not blank_lines and
not blank_before and
logical_line.startswith(('def ')) and
'(self' in logical_line
):
yield (0, 'E301 expected 1 blank line, found 0')
pep8.register_check(extended_blank_lines)
def continued_indentation(logical_line, tokens, indent_level, indent_char,
noqa):
"""Override pep8's function to provide indentation information."""
first_row = tokens[0][2][0]
nrows = 1 + tokens[-1][2][0] - first_row
if noqa or nrows == 1:
return
# indent_next tells us whether the next block is indented. Assuming
# that it is indented by 4 spaces, then we should not allow 4-space
# indents on the final continuation line. In turn, some other
# indents are allowed to have an extra 4 spaces.
indent_next = logical_line.endswith(':')
row = depth = 0
valid_hangs = (
(DEFAULT_INDENT_SIZE,)
if indent_char != '\t' else (DEFAULT_INDENT_SIZE,
2 * DEFAULT_INDENT_SIZE)
)
# Remember how many brackets were opened on each line.
parens = [0] * nrows
# Relative indents of physical lines.
rel_indent = [0] * nrows
# For each depth, collect a list of opening rows.
open_rows = [[0]]
# For each depth, memorize the hanging indentation.
hangs = [None]
# Visual indents.
indent_chances = {}
last_indent = tokens[0][2]
indent = [last_indent[1]]
last_token_multiline = None
line = None
last_line = ''
last_line_begins_with_multiline = False
for token_type, text, start, end, line in tokens:
newline = row < start[0] - first_row
if newline:
row = start[0] - first_row
newline = (not last_token_multiline and
token_type not in (tokenize.NL, tokenize.NEWLINE))
last_line_begins_with_multiline = last_token_multiline
if newline:
# This is the beginning of a continuation line.
last_indent = start
# Record the initial indent.
rel_indent[row] = pep8.expand_indent(line) - indent_level
# Identify closing bracket.
close_bracket = (token_type == tokenize.OP and text in ']})')
# Is the indent relative to an opening bracket line?
for open_row in reversed(open_rows[depth]):
hang = rel_indent[row] - rel_indent[open_row]
hanging_indent = hang in valid_hangs
if hanging_indent:
break
if hangs[depth]:
hanging_indent = (hang == hangs[depth])
visual_indent = (not close_bracket and hang > 0 and
indent_chances.get(start[1]))
if close_bracket and indent[depth]:
# Closing bracket for visual indent.
if start[1] != indent[depth]:
yield (start, 'E124 {0}'.format(indent[depth]))
elif close_bracket and not hang:
pass
elif indent[depth] and start[1] < indent[depth]:
# Visual indent is broken.
yield (start, 'E128 {0}'.format(indent[depth]))
elif (hanging_indent or
(indent_next and
rel_indent[row] == 2 * DEFAULT_INDENT_SIZE)):
# Hanging indent is verified.
if close_bracket:
yield (start, 'E123 {0}'.format(indent_level +
rel_indent[open_row]))
hangs[depth] = hang
elif visual_indent is True:
# Visual indent is verified.
indent[depth] = start[1]
elif visual_indent in (text, unicode):
# Ignore token lined up with matching one from a previous line.
pass
else:
one_indented = (indent_level + rel_indent[open_row] +
DEFAULT_INDENT_SIZE)
# Indent is broken.
if hang <= 0:
error = ('E122', one_indented)
elif indent[depth]:
error = ('E127', indent[depth])
elif hang > DEFAULT_INDENT_SIZE:
error = ('E126', one_indented)
else:
hangs[depth] = hang
error = ('E121', one_indented)
yield (start, '{0} {1}'.format(*error))
# Look for visual indenting.
if (
parens[row] and
token_type not in (tokenize.NL, tokenize.COMMENT) and
not indent[depth]
):
indent[depth] = start[1]
indent_chances[start[1]] = True
# Deal with implicit string concatenation.
elif (token_type in (tokenize.STRING, tokenize.COMMENT) or
text in ('u', 'ur', 'b', 'br')):
indent_chances[start[1]] = unicode
# Special case for the "if" statement because len("if (") is equal to
# 4.
elif not indent_chances and not row and not depth and text == 'if':
indent_chances[end[1] + 1] = True
elif text == ':' and line[end[1]:].isspace():
open_rows[depth].append(row)
# Keep track of bracket depth.
if token_type == tokenize.OP:
if text in '([{':
depth += 1
indent.append(0)
hangs.append(None)
if len(open_rows) == depth:
open_rows.append([])
open_rows[depth].append(row)
parens[row] += 1
elif text in ')]}' and depth > 0:
# Parent indents should not be more than this one.
prev_indent = indent.pop() or last_indent[1]
hangs.pop()
for d in range(depth):
if indent[d] > prev_indent:
indent[d] = 0
for ind in list(indent_chances):
if ind >= prev_indent:
del indent_chances[ind]
del open_rows[depth + 1:]
depth -= 1
if depth:
indent_chances[indent[depth]] = True
for idx in range(row, -1, -1):
if parens[idx]:
parens[idx] -= 1
break
assert len(indent) == depth + 1
if (
start[1] not in indent_chances and
# This is for purposes of speeding up E121 (GitHub #90).
not last_line.rstrip().endswith(',')
):
# Allow to line up tokens.
indent_chances[start[1]] = text
last_token_multiline = (start[0] != end[0])
if last_token_multiline:
rel_indent[end[0] - first_row] = rel_indent[row]
last_line = line
if (
indent_next and
not last_line_begins_with_multiline and
pep8.expand_indent(line) == indent_level + DEFAULT_INDENT_SIZE
):
pos = (start[0], indent[0] + 4)
yield (pos, 'E125 {0}'.format(indent_level +
2 * DEFAULT_INDENT_SIZE))
del pep8._checks['logical_line'][pep8.continued_indentation]
pep8.register_check(continued_indentation)
class FixPEP8(object):
"""Fix invalid code.
Fixer methods are prefixed "fix_". The _fix_source() method looks for these
automatically.
The fixer method can take either one or two arguments (in addition to
self). The first argument is "result", which is the error information from
pep8. The second argument, "logical", is required only for logical-line
fixes.
The fixer method can return the list of modified lines or None. An empty
list would mean that no changes were made. None would mean that only the
line reported in the pep8 error was modified. Note that the modified line
numbers that are returned are indexed at 1. This typically would correspond
with the line number reported in the pep8 error information.
[fixed method list]
- e121,e122,e123,e124,e125,e126,e127,e128,e129
- e201,e202,e203
- e211
- e221,e222,e223,e224,e225
- e231
- e251
- e261,e262
- e271,e272,e273,e274
- e301,e302,e303
- e401
- e502
- e701,e702
- e711
- w291
"""
def __init__(self, filename,
options,
contents=None,
long_line_ignore_cache=None):
self.filename = filename
if contents is None:
self.source = readlines_from_file(filename)
else:
sio = io.StringIO(contents)
self.source = sio.readlines()
self.options = options
self.indent_word = _get_indentword(''.join(self.source))
self.long_line_ignore_cache = (
set() if long_line_ignore_cache is None
else long_line_ignore_cache)
# Many fixers are the same even though pep8 categorizes them
# differently.
self.fix_e115 = self.fix_e112
self.fix_e116 = self.fix_e113
self.fix_e121 = self._fix_reindent
self.fix_e122 = self._fix_reindent
self.fix_e123 = self._fix_reindent
self.fix_e124 = self._fix_reindent
self.fix_e126 = self._fix_reindent
self.fix_e127 = self._fix_reindent
self.fix_e128 = self._fix_reindent
self.fix_e129 = self._fix_reindent
self.fix_e202 = self.fix_e201
self.fix_e203 = self.fix_e201
self.fix_e211 = self.fix_e201
self.fix_e221 = self.fix_e271
self.fix_e222 = self.fix_e271
self.fix_e223 = self.fix_e271
self.fix_e226 = self.fix_e225
self.fix_e227 = self.fix_e225
self.fix_e228 = self.fix_e225
self.fix_e241 = self.fix_e271
self.fix_e242 = self.fix_e224
self.fix_e261 = self.fix_e262
self.fix_e272 = self.fix_e271
self.fix_e273 = self.fix_e271
self.fix_e274 = self.fix_e271
self.fix_e309 = self.fix_e301
self.fix_e501 = (
self.fix_long_line_logically if
options and (options.aggressive >= 2 or options.experimental) else
self.fix_long_line_physically)
self.fix_e703 = self.fix_e702
self.fix_w293 = self.fix_w291
def _fix_source(self, results):
try:
(logical_start, logical_end) = _find_logical(self.source)
logical_support = True
except (SyntaxError, tokenize.TokenError): # pragma: no cover
logical_support = False
completed_lines = set()
for result in sorted(results, key=_priority_key):
if result['line'] in completed_lines:
continue
fixed_methodname = 'fix_' + result['id'].lower()
if hasattr(self, fixed_methodname):
fix = getattr(self, fixed_methodname)
line_index = result['line'] - 1
original_line = self.source[line_index]
is_logical_fix = len(inspect.getargspec(fix).args) > 2
if is_logical_fix:
logical = None
if logical_support:
logical = _get_logical(self.source,
result,
logical_start,
logical_end)
if logical and set(range(
logical[0][0] + 1,
logical[1][0] + 1)).intersection(
completed_lines):
continue
modified_lines = fix(result, logical)
else:
modified_lines = fix(result)
if modified_lines is None:
# Force logical fixes to report what they modified.
assert not is_logical_fix
if self.source[line_index] == original_line:
modified_lines = []
if modified_lines:
completed_lines.update(modified_lines)
elif modified_lines == []: # Empty list means no fix
if self.options.verbose >= 2:
print(
'---> Not fixing {f} on line {l}'.format(
f=result['id'], l=result['line']),
file=sys.stderr)
else: # We assume one-line fix when None.
completed_lines.add(result['line'])
else:
if self.options.verbose >= 3:
print(
"---> '{0}' is not defined.".format(fixed_methodname),
file=sys.stderr)
info = result['info'].strip()
print('---> {0}:{1}:{2}:{3}'.format(self.filename,
result['line'],
result['column'],
info),
file=sys.stderr)
def fix(self):
"""Return a version of the source code with PEP 8 violations fixed."""
pep8_options = {
'ignore': self.options.ignore,
'select': self.options.select,
'max_line_length': self.options.max_line_length,
}
results = _execute_pep8(pep8_options, self.source)
if self.options.verbose:
progress = {}
for r in results:
if r['id'] not in progress:
progress[r['id']] = set()
progress[r['id']].add(r['line'])
print('---> {n} issue(s) to fix {progress}'.format(
n=len(results), progress=progress), file=sys.stderr)
if self.options.line_range:
start, end = self.options.line_range
results = [r for r in results
if start <= r['line'] <= end]
self._fix_source(filter_results(source=''.join(self.source),
results=results,
aggressive=self.options.aggressive))
if self.options.line_range:
# If number of lines has changed then change line_range.
count = sum(sline.count('\n')
for sline in self.source[start - 1:end])
self.options.line_range[1] = start + count - 1
return ''.join(self.source)
def _fix_reindent(self, result):
"""Fix a badly indented line.
This is done by adding or removing from its initial indent only.
"""
num_indent_spaces = int(result['info'].split()[1])
line_index = result['line'] - 1
target = self.source[line_index]
self.source[line_index] = ' ' * num_indent_spaces + target.lstrip()
def fix_e112(self, result):
"""Fix under-indented comments."""
line_index = result['line'] - 1
target = self.source[line_index]
if not target.lstrip().startswith('#'):
# Don't screw with invalid syntax.
return []
self.source[line_index] = self.indent_word + target
def fix_e113(self, result):
"""Fix over-indented comments."""
line_index = result['line'] - 1
target = self.source[line_index]
indent = _get_indentation(target)
stripped = target.lstrip()
if not stripped.startswith('#'):
# Don't screw with invalid syntax.
return []
self.source[line_index] = indent[1:] + stripped
def fix_e125(self, result):
"""Fix indentation undistinguish from the next logical line."""
num_indent_spaces = int(result['info'].split()[1])
line_index = result['line'] - 1
target = self.source[line_index]
spaces_to_add = num_indent_spaces - len(_get_indentation(target))
indent = len(_get_indentation(target))
modified_lines = []
while len(_get_indentation(self.source[line_index])) >= indent:
self.source[line_index] = (' ' * spaces_to_add +
self.source[line_index])
modified_lines.append(1 + line_index) # Line indexed at 1.
line_index -= 1
return modified_lines
def fix_e201(self, result):
"""Remove extraneous whitespace."""
line_index = result['line'] - 1
target = self.source[line_index]
offset = result['column'] - 1
if is_probably_part_of_multiline(target):
return []
fixed = fix_whitespace(target,
offset=offset,
replacement='')
self.source[line_index] = fixed
def fix_e224(self, result):
"""Remove extraneous whitespace around operator."""
target = self.source[result['line'] - 1]
offset = result['column'] - 1
fixed = target[:offset] + target[offset:].replace('\t', ' ')
self.source[result['line'] - 1] = fixed
def fix_e225(self, result):
"""Fix missing whitespace around operator."""
target = self.source[result['line'] - 1]
offset = result['column'] - 1
fixed = target[:offset] + ' ' + target[offset:]
# Only proceed if non-whitespace characters match.
# And make sure we don't break the indentation.
if (
fixed.replace(' ', '') == target.replace(' ', '') and
_get_indentation(fixed) == _get_indentation(target)
):
self.source[result['line'] - 1] = fixed
else:
return []
def fix_e231(self, result):
"""Add missing whitespace."""
line_index = result['line'] - 1
target = self.source[line_index]
offset = result['column']
fixed = target[:offset] + ' ' + target[offset:]
self.source[line_index] = fixed
def fix_e251(self, result):
"""Remove whitespace around parameter '=' sign."""
line_index = result['line'] - 1
target = self.source[line_index]
# This is necessary since pep8 sometimes reports columns that goes
# past the end of the physical line. This happens in cases like,
# foo(bar\n=None)
c = min(result['column'] - 1,
len(target) - 1)
if target[c].strip():
fixed = target
else:
fixed = target[:c].rstrip() + target[c:].lstrip()
# There could be an escaped newline
#
# def foo(a=\
# 1)
if fixed.endswith(('=\\\n', '=\\\r\n', '=\\\r')):
self.source[line_index] = fixed.rstrip('\n\r \t\\')
self.source[line_index + 1] = self.source[line_index + 1].lstrip()
return [line_index + 1, line_index + 2] # Line indexed at 1
self.source[result['line'] - 1] = fixed
def fix_e262(self, result):
"""Fix spacing after comment hash."""
target = self.source[result['line'] - 1]
offset = result['column']
code = target[:offset].rstrip(' \t#')
comment = target[offset:].lstrip(' \t#')
fixed = code + (' # ' + comment if comment.strip() else '\n')
self.source[result['line'] - 1] = fixed
def fix_e271(self, result):
"""Fix extraneous whitespace around keywords."""
line_index = result['line'] - 1
target = self.source[line_index]
offset = result['column'] - 1
if is_probably_part_of_multiline(target):
return []
fixed = fix_whitespace(target,
offset=offset,
replacement=' ')
if fixed == target:
return []
else:
self.source[line_index] = fixed
def fix_e301(self, result):
"""Add missing blank line."""
cr = '\n'
self.source[result['line'] - 1] = cr + self.source[result['line'] - 1]
def fix_e302(self, result):
"""Add missing 2 blank lines."""
add_linenum = 2 - int(result['info'].split()[-1])
cr = '\n' * add_linenum
self.source[result['line'] - 1] = cr + self.source[result['line'] - 1]
def fix_e303(self, result):
"""Remove extra blank lines."""
delete_linenum = int(result['info'].split('(')[1].split(')')[0]) - 2
delete_linenum = max(1, delete_linenum)
# We need to count because pep8 reports an offset line number if there
# are comments.
cnt = 0
line = result['line'] - 2
modified_lines = []
while cnt < delete_linenum and line >= 0:
if not self.source[line].strip():
self.source[line] = ''
modified_lines.append(1 + line) # Line indexed at 1
cnt += 1
line -= 1
return modified_lines
def fix_e304(self, result):
"""Remove blank line following function decorator."""
line = result['line'] - 2
if not self.source[line].strip():
self.source[line] = ''
def fix_e401(self, result):
"""Put imports on separate lines."""
line_index = result['line'] - 1
target = self.source[line_index]
offset = result['column'] - 1
if not target.lstrip().startswith('import'):
return []
indentation = re.split(pattern=r'\bimport\b',
string=target, maxsplit=1)[0]
fixed = (target[:offset].rstrip('\t ,') + '\n' +
indentation + 'import ' + target[offset:].lstrip('\t ,'))
self.source[line_index] = fixed
def fix_long_line_logically(self, result, logical):
"""Try to make lines fit within --max-line-length characters."""
if (
not logical or
len(logical[2]) == 1 or
self.source[result['line'] - 1].lstrip().startswith('#')
):
return self.fix_long_line_physically(result)
start_line_index = logical[0][0]
end_line_index = logical[1][0]
logical_lines = logical[2]
previous_line = get_item(self.source, start_line_index - 1, default='')
next_line = get_item(self.source, end_line_index + 1, default='')
single_line = join_logical_line(''.join(logical_lines))
try:
fixed = self.fix_long_line(
target=single_line,
previous_line=previous_line,
next_line=next_line,
original=''.join(logical_lines))
except (SyntaxError, tokenize.TokenError):
return self.fix_long_line_physically(result)
if fixed:
for line_index in range(start_line_index, end_line_index + 1):
self.source[line_index] = ''
self.source[start_line_index] = fixed
return range(start_line_index + 1, end_line_index + 1)
else:
return []
def fix_long_line_physically(self, result):
"""Try to make lines fit within --max-line-length characters."""
line_index = result['line'] - 1
target = self.source[line_index]
previous_line = get_item(self.source, line_index - 1, default='')
next_line = get_item(self.source, line_index + 1, default='')
try:
fixed = self.fix_long_line(
target=target,
previous_line=previous_line,
next_line=next_line,
original=target)
except (SyntaxError, tokenize.TokenError):
return []
if fixed:
self.source[line_index] = fixed
return [line_index + 1]
else:
return []
def fix_long_line(self, target, previous_line,
next_line, original):
cache_entry = (target, previous_line, next_line)
if cache_entry in self.long_line_ignore_cache:
return []
if target.lstrip().startswith('#'):
# Wrap commented lines.
return shorten_comment(
line=target,
max_line_length=self.options.max_line_length,
last_comment=not next_line.lstrip().startswith('#'))
fixed = get_fixed_long_line(
target=target,
previous_line=previous_line,
original=original,
indent_word=self.indent_word,
max_line_length=self.options.max_line_length,
aggressive=self.options.aggressive,
experimental=self.options.experimental,
verbose=self.options.verbose)
if fixed and not code_almost_equal(original, fixed):
return fixed
else:
self.long_line_ignore_cache.add(cache_entry)
return None
def fix_e502(self, result):
"""Remove extraneous escape of newline."""
(line_index, _, target) = get_index_offset_contents(result,
self.source)
self.source[line_index] = target.rstrip('\n\r \t\\') + '\n'
def fix_e701(self, result):
"""Put colon-separated compound statement on separate lines."""
line_index = result['line'] - 1
target = self.source[line_index]
c = result['column']
fixed_source = (target[:c] + '\n' +
_get_indentation(target) + self.indent_word +
target[c:].lstrip('\n\r \t\\'))
self.source[result['line'] - 1] = fixed_source
return [result['line'], result['line'] + 1]
def fix_e702(self, result, logical):
"""Put semicolon-separated compound statement on separate lines."""
if not logical:
return [] # pragma: no cover
logical_lines = logical[2]
line_index = result['line'] - 1
target = self.source[line_index]
if target.rstrip().endswith('\\'):
# Normalize '1; \\\n2' into '1; 2'.
self.source[line_index] = target.rstrip('\n \r\t\\')
self.source[line_index + 1] = self.source[line_index + 1].lstrip()
return [line_index + 1, line_index + 2]
if target.rstrip().endswith(';'):
self.source[line_index] = target.rstrip('\n \r\t;') + '\n'
return [line_index + 1]
offset = result['column'] - 1
first = target[:offset].rstrip(';').rstrip()
second = (_get_indentation(logical_lines[0]) +
target[offset:].lstrip(';').lstrip())
# find inline commnet
inline_comment = None
if '# ' == target[offset:].lstrip(';').lstrip()[:2]:
inline_comment = target[offset:].lstrip(';')
if inline_comment:
self.source[line_index] = first + inline_comment
else:
self.source[line_index] = first + '\n' + second
return [line_index + 1]
def fix_e711(self, result):
"""Fix comparison with None."""
(line_index, offset, target) = get_index_offset_contents(result,
self.source)
right_offset = offset + 2
if right_offset >= len(target):
return []
left = target[:offset].rstrip()
center = target[offset:right_offset]
right = target[right_offset:].lstrip()
if not right.startswith('None'):
return []
if center.strip() == '==':
new_center = 'is'
elif center.strip() == '!=':
new_center = 'is not'
else:
return []
self.source[line_index] = ' '.join([left, new_center, right])
def fix_e712(self, result):
"""Fix (trivial case of) comparison with boolean."""
(line_index, offset, target) = get_index_offset_contents(result,
self.source)
# Handle very easy "not" special cases.
if re.match(r'^\s*if [\w.]+ == False:$', target):
self.source[line_index] = re.sub(r'if ([\w.]+) == False:',
r'if not \1:', target, count=1)
elif re.match(r'^\s*if [\w.]+ != True:$', target):
self.source[line_index] = re.sub(r'if ([\w.]+) != True:',
r'if not \1:', target, count=1)
else:
right_offset = offset + 2
if right_offset >= len(target):
return []
left = target[:offset].rstrip()
center = target[offset:right_offset]
right = target[right_offset:].lstrip()
# Handle simple cases only.
new_right = None
if center.strip() == '==':
if re.match(r'\bTrue\b', right):
new_right = re.sub(r'\bTrue\b *', '', right, count=1)
elif center.strip() == '!=':
if re.match(r'\bFalse\b', right):
new_right = re.sub(r'\bFalse\b *', '', right, count=1)
if new_right is None:
return []
if new_right[0].isalnum():
new_right = ' ' + new_right
self.source[line_index] = left + new_right
def fix_e713(self, result):
"""Fix (trivial case of) non-membership check."""
(line_index, _, target) = get_index_offset_contents(result,
self.source)
# Handle very easy case only.
if re.match(r'^\s*if not [\w.]+ in [\w.]+:$', target):
self.source[line_index] = re.sub(r'if not ([\w.]+) in ([\w.]+):',
r'if \1 not in \2:',
target,
count=1)
def fix_w291(self, result):
"""Remove trailing whitespace."""
fixed_line = self.source[result['line'] - 1].rstrip()
self.source[result['line'] - 1] = fixed_line + '\n'
def fix_w391(self, _):
"""Remove trailing blank lines."""
blank_count = 0
for line in reversed(self.source):
line = line.rstrip()
if line:
break
else:
blank_count += 1
original_length = len(self.source)
self.source = self.source[:original_length - blank_count]
return range(1, 1 + original_length)
def get_index_offset_contents(result, source):
"""Return (line_index, column_offset, line_contents)."""
line_index = result['line'] - 1
return (line_index,
result['column'] - 1,
source[line_index])
def get_fixed_long_line(target, previous_line, original,
indent_word=' ', max_line_length=79,
aggressive=False, experimental=False, verbose=False):
"""Break up long line and return result.
Do this by generating multiple reformatted candidates and then
ranking the candidates to heuristically select the best option.
"""
indent = _get_indentation(target)
source = target[len(indent):]
assert source.lstrip() == source
# Check for partial multiline.
tokens = list(generate_tokens(source))
candidates = shorten_line(
tokens, source, indent,
indent_word,
max_line_length,
aggressive=aggressive,
experimental=experimental,
previous_line=previous_line)
# Also sort alphabetically as a tie breaker (for determinism).
candidates = sorted(
sorted(set(candidates).union([target, original])),
key=lambda x: line_shortening_rank(
x,
indent_word,
max_line_length,
experimental=experimental))
if verbose >= 4:
print(('-' * 79 + '\n').join([''] + candidates + ['']),
file=wrap_output(sys.stderr, 'utf-8'))
if candidates:
best_candidate = candidates[0]
# Don't allow things to get longer.
if longest_line_length(best_candidate) > longest_line_length(original):
return None
else:
return best_candidate
def longest_line_length(code):
"""Return length of longest line."""
return max(len(line) for line in code.splitlines())
def join_logical_line(logical_line):
"""Return single line based on logical line input."""
indentation = _get_indentation(logical_line)
return indentation + untokenize_without_newlines(
generate_tokens(logical_line.lstrip())) + '\n'
def untokenize_without_newlines(tokens):
"""Return source code based on tokens."""
text = ''
last_row = 0
last_column = -1
for t in tokens:
token_string = t[1]
(start_row, start_column) = t[2]
(end_row, end_column) = t[3]
if start_row > last_row:
last_column = 0
if (
(start_column > last_column or token_string == '\n') and
not text.endswith(' ')
):
text += ' '
if token_string != '\n':
text += token_string
last_row = end_row
last_column = end_column
return text.rstrip()
def _find_logical(source_lines):
# Make a variable which is the index of all the starts of lines.
logical_start = []
logical_end = []
last_newline = True
parens = 0
for t in generate_tokens(''.join(source_lines)):
if t[0] in [tokenize.COMMENT, tokenize.DEDENT,
tokenize.INDENT, tokenize.NL,
tokenize.ENDMARKER]:
continue
if not parens and t[0] in [tokenize.NEWLINE, tokenize.SEMI]:
last_newline = True
logical_end.append((t[3][0] - 1, t[2][1]))
continue
if last_newline and not parens:
logical_start.append((t[2][0] - 1, t[2][1]))
last_newline = False
if t[0] == tokenize.OP:
if t[1] in '([{':
parens += 1
elif t[1] in '}])':
parens -= 1
return (logical_start, logical_end)
def _get_logical(source_lines, result, logical_start, logical_end):
"""Return the logical line corresponding to the result.
Assumes input is already E702-clean.
"""
row = result['line'] - 1
col = result['column'] - 1
ls = None
le = None
for i in range(0, len(logical_start), 1):
assert logical_end
x = logical_end[i]
if x[0] > row or (x[0] == row and x[1] > col):
le = x
ls = logical_start[i]
break
if ls is None:
return None
original = source_lines[ls[0]:le[0] + 1]
return ls, le, original
def get_item(items, index, default=None):
if 0 <= index < len(items):
return items[index]
else:
return default
def reindent(source, indent_size):
"""Reindent all lines."""
reindenter = Reindenter(source)
return reindenter.run(indent_size)
def code_almost_equal(a, b):
"""Return True if code is similar.
Ignore whitespace when comparing specific line.
"""
split_a = split_and_strip_non_empty_lines(a)
split_b = split_and_strip_non_empty_lines(b)
if len(split_a) != len(split_b):
return False
for index in range(len(split_a)):
if ''.join(split_a[index].split()) != ''.join(split_b[index].split()):
return False
return True
def split_and_strip_non_empty_lines(text):
"""Return lines split by newline.
Ignore empty lines.
"""
return [line.strip() for line in text.splitlines() if line.strip()]
def fix_e265(source, aggressive=False): # pylint: disable=unused-argument
"""Format block comments."""
if '#' not in source:
# Optimization.
return source
ignored_line_numbers = multiline_string_lines(
source,
include_docstrings=True) | set(commented_out_code_lines(source))
fixed_lines = []
sio = io.StringIO(source)
for (line_number, line) in enumerate(sio.readlines(), start=1):
if (
line.lstrip().startswith('#') and
line_number not in ignored_line_numbers
):
indentation = _get_indentation(line)
line = line.lstrip()
# Normalize beginning if not a shebang.
if len(line) > 1:
pos = next((index for index, c in enumerate(line)
if c != '#'))
if (
# Leave multiple spaces like '# ' alone.
(line[:pos].count('#') > 1 or line[1].isalnum()) and
# Leave stylistic outlined blocks alone.
not line.rstrip().endswith('#')
):
line = '# ' + line.lstrip('# \t')
fixed_lines.append(indentation + line)
else:
fixed_lines.append(line)
return ''.join(fixed_lines)
def refactor(source, fixer_names, ignore=None, filename=''):
"""Return refactored code using lib2to3.
Skip if ignore string is produced in the refactored code.
"""
from lib2to3 import pgen2
try:
new_text = refactor_with_2to3(source,
fixer_names=fixer_names,
filename=filename)
except (pgen2.parse.ParseError,
SyntaxError,
UnicodeDecodeError,
UnicodeEncodeError):
return source
if ignore:
if ignore in new_text and ignore not in source:
return source
return new_text
def code_to_2to3(select, ignore):
fixes = set()
for code, fix in CODE_TO_2TO3.items():
if code_match(code, select=select, ignore=ignore):
fixes |= set(fix)
return fixes
def fix_2to3(source,
aggressive=True, select=None, ignore=None, filename=''):
"""Fix various deprecated code (via lib2to3)."""
if not aggressive:
return source
select = select or []
ignore = ignore or []
return refactor(source,
code_to_2to3(select=select,
ignore=ignore),
filename=filename)
def fix_w602(source, aggressive=True):
"""Fix deprecated form of raising exception."""
if not aggressive:
return source
return refactor(source, ['raise'],
ignore='with_traceback')
def find_newline(source):
"""Return type of newline used in source.
Input is a list of lines.
"""
assert not isinstance(source, unicode)
counter = collections.defaultdict(int)
for line in source:
if line.endswith(CRLF):
counter[CRLF] += 1
elif line.endswith(CR):
counter[CR] += 1
elif line.endswith(LF):
counter[LF] += 1
return (sorted(counter, key=counter.get, reverse=True) or [LF])[0]
def _get_indentword(source):
"""Return indentation type."""
indent_word = ' ' # Default in case source has no indentation
try:
for t in generate_tokens(source):
if t[0] == token.INDENT:
indent_word = t[1]
break
except (SyntaxError, tokenize.TokenError):
pass
return indent_word
def _get_indentation(line):
"""Return leading whitespace."""
if line.strip():
non_whitespace_index = len(line) - len(line.lstrip())
return line[:non_whitespace_index]
else:
return ''
def get_diff_text(old, new, filename):
"""Return text of unified diff between old and new."""
newline = '\n'
diff = difflib.unified_diff(
old, new,
'original/' + filename,
'fixed/' + filename,
lineterm=newline)
text = ''
for line in diff:
text += line
# Work around missing newline (http://bugs.python.org/issue2142).
if text and not line.endswith(newline):
text += newline + r'\ No newline at end of file' + newline
return text
def _priority_key(pep8_result):
"""Key for sorting PEP8 results.
Global fixes should be done first. This is important for things like
indentation.
"""
priority = [
# Fix multiline colon-based before semicolon based.
'e701',
# Break multiline statements early.
'e702',
# Things that make lines longer.
'e225', 'e231',
# Remove extraneous whitespace before breaking lines.
'e201',
# Shorten whitespace in comment before resorting to wrapping.
'e262'
]
middle_index = 10000
lowest_priority = [
# We need to shorten lines last since the logical fixer can get in a
# loop, which causes us to exit early.
'e501'
]
key = pep8_result['id'].lower()
try:
return priority.index(key)
except ValueError:
try:
return middle_index + lowest_priority.index(key) + 1
except ValueError:
return middle_index
def shorten_line(tokens, source, indentation, indent_word, max_line_length,
aggressive=False, experimental=False, previous_line=''):
"""Separate line at OPERATOR.
Multiple candidates will be yielded.
"""
for candidate in _shorten_line(tokens=tokens,
source=source,
indentation=indentation,
indent_word=indent_word,
aggressive=aggressive,
previous_line=previous_line):
yield candidate
if aggressive:
for key_token_strings in SHORTEN_OPERATOR_GROUPS:
shortened = _shorten_line_at_tokens(
tokens=tokens,
source=source,
indentation=indentation,
indent_word=indent_word,
key_token_strings=key_token_strings,
aggressive=aggressive)
if shortened is not None and shortened != source:
yield shortened
if experimental:
for shortened in _shorten_line_at_tokens_new(
tokens=tokens,
source=source,
indentation=indentation,
max_line_length=max_line_length):
yield shortened
def _shorten_line(tokens, source, indentation, indent_word,
aggressive=False, previous_line=''):
"""Separate line at OPERATOR.
The input is expected to be free of newlines except for inside multiline
strings and at the end.
Multiple candidates will be yielded.
"""
for (token_type,
token_string,
start_offset,
end_offset) in token_offsets(tokens):
if (
token_type == tokenize.COMMENT and
not is_probably_part_of_multiline(previous_line) and
not is_probably_part_of_multiline(source) and
not source[start_offset + 1:].strip().lower().startswith(
('noqa', 'pragma:', 'pylint:'))
):
# Move inline comments to previous line.
first = source[:start_offset]
second = source[start_offset:]
yield (indentation + second.strip() + '\n' +
indentation + first.strip() + '\n')
elif token_type == token.OP and token_string != '=':
# Don't break on '=' after keyword as this violates PEP 8.
assert token_type != token.INDENT
first = source[:end_offset]
second_indent = indentation
if first.rstrip().endswith('('):
second_indent += indent_word
elif '(' in first:
second_indent += ' ' * (1 + first.find('('))
else:
second_indent += indent_word
second = (second_indent + source[end_offset:].lstrip())
if (
not second.strip() or
second.lstrip().startswith('#')
):
continue
# Do not begin a line with a comma
if second.lstrip().startswith(','):
continue
# Do end a line with a dot
if first.rstrip().endswith('.'):
continue
if token_string in '+-*/':
fixed = first + ' \\' + '\n' + second
else:
fixed = first + '\n' + second
# Only fix if syntax is okay.
if check_syntax(normalize_multiline(fixed)
if aggressive else fixed):
yield indentation + fixed
# A convenient way to handle tokens.
Token = collections.namedtuple('Token', ['token_type', 'token_string',
'spos', 'epos', 'line'])
class ReformattedLines(object):
"""The reflowed lines of atoms.
Each part of the line is represented as an "atom." They can be moved
around when need be to get the optimal formatting.
"""
###########################################################################
# Private Classes
class _Indent(object):
"""Represent an indentation in the atom stream."""
def __init__(self, indent_amt):
self._indent_amt = indent_amt
def emit(self):
return ' ' * self._indent_amt
@property
def size(self):
return self._indent_amt
class _Space(object):
"""Represent a space in the atom stream."""
def emit(self):
return ' '
@property
def size(self):
return 1
class _LineBreak(object):
"""Represent a line break in the atom stream."""
def emit(self):
return '\n'
@property
def size(self):
return 0
def __init__(self, max_line_length):
self._max_line_length = max_line_length
self._lines = []
self._bracket_depth = 0
self._prev_item = None
self._prev_prev_item = None
def __repr__(self):
return self.emit()
###########################################################################
# Public Methods
def add(self, obj, indent_amt, break_after_open_bracket):
if isinstance(obj, Atom):
self._add_item(obj, indent_amt)
return
self._add_container(obj, indent_amt, break_after_open_bracket)
def add_comment(self, item):
num_spaces = 2
if len(self._lines) > 1:
if isinstance(self._lines[-1], self._Space):
num_spaces -= 1
if len(self._lines) > 2:
if isinstance(self._lines[-2], self._Space):
num_spaces -= 1
while num_spaces > 0:
self._lines.append(self._Space())
num_spaces -= 1
self._lines.append(item)
def add_indent(self, indent_amt):
self._lines.append(self._Indent(indent_amt))
def add_line_break(self, indent):
self._lines.append(self._LineBreak())
self.add_indent(len(indent))
def add_line_break_at(self, index, indent_amt):
self._lines.insert(index, self._LineBreak())
self._lines.insert(index + 1, self._Indent(indent_amt))
def add_space_if_needed(self, curr_text, equal=False):
if (
not self._lines or isinstance(
self._lines[-1], (self._LineBreak, self._Indent, self._Space))
):
return
prev_text = unicode(self._prev_item)
prev_prev_text = (
unicode(self._prev_prev_item) if self._prev_prev_item else '')
if (
# The previous item was a keyword or identifier and the current
# item isn't an operator that doesn't require a space.
((self._prev_item.is_keyword or self._prev_item.is_string or
self._prev_item.is_name or self._prev_item.is_number) and
(curr_text[0] not in '([{.,:}])' or
(curr_text[0] == '=' and equal))) or
# Don't place spaces around a '.', unless it's in an 'import'
# statement.
((prev_prev_text != 'from' and prev_text[-1] != '.' and
curr_text != 'import') and
# Don't place a space before a colon.
curr_text[0] != ':' and
# Don't split up ending brackets by spaces.
((prev_text[-1] in '}])' and curr_text[0] not in '.,}])') or
# Put a space after a colon or comma.
prev_text[-1] in ':,' or
# Put space around '=' if asked to.
(equal and prev_text == '=') or
# Put spaces around non-unary arithmetic operators.
((self._prev_prev_item and
(prev_text not in '+-' and
(self._prev_prev_item.is_name or
self._prev_prev_item.is_number or
self._prev_prev_item.is_string)) and
prev_text in ('+', '-', '%', '*', '/', '//', '**', 'in')))))
):
self._lines.append(self._Space())
def previous_item(self):
"""Return the previous non-whitespace item."""
return self._prev_item
def fits_on_current_line(self, item_extent):
return self.current_size() + item_extent <= self._max_line_length
def current_size(self):
"""The size of the current line minus the indentation."""
size = 0
for item in reversed(self._lines):
size += item.size
if isinstance(item, self._LineBreak):
break
return size
def line_empty(self):
return (self._lines and
isinstance(self._lines[-1],
(self._LineBreak, self._Indent)))
def emit(self):
string = ''
for item in self._lines:
if isinstance(item, self._LineBreak):
string = string.rstrip()
string += item.emit()
return string.rstrip() + '\n'
###########################################################################
# Private Methods
def _add_item(self, item, indent_amt):
"""Add an item to the line.
Reflow the line to get the best formatting after the item is
inserted. The bracket depth indicates if the item is being
inserted inside of a container or not.
"""
if self._prev_item and self._prev_item.is_string and item.is_string:
# Place consecutive string literals on separate lines.
self._lines.append(self._LineBreak())
self._lines.append(self._Indent(indent_amt))
item_text = unicode(item)
if self._lines and self._bracket_depth:
# Adding the item into a container.
self._prevent_default_initializer_splitting(item, indent_amt)
if item_text in '.,)]}':
self._split_after_delimiter(item, indent_amt)
elif self._lines and not self.line_empty():
# Adding the item outside of a container.
if self.fits_on_current_line(len(item_text)):
self._enforce_space(item)
else:
# Line break for the new item.
self._lines.append(self._LineBreak())
self._lines.append(self._Indent(indent_amt))
self._lines.append(item)
self._prev_item, self._prev_prev_item = item, self._prev_item
if item_text in '([{':
self._bracket_depth += 1
elif item_text in '}])':
self._bracket_depth -= 1
assert self._bracket_depth >= 0
def _add_container(self, container, indent_amt, break_after_open_bracket):
actual_indent = indent_amt + 1
if (
unicode(self._prev_item) != '=' and
not self.line_empty() and
not self.fits_on_current_line(
container.size + self._bracket_depth + 2)
):
if unicode(container)[0] == '(' and self._prev_item.is_name:
# Don't split before the opening bracket of a call.
break_after_open_bracket = True
actual_indent = indent_amt + 4
elif (
break_after_open_bracket or
unicode(self._prev_item) not in '([{'
):
# If the container doesn't fit on the current line and the
# current line isn't empty, place the container on the next
# line.
self._lines.append(self._LineBreak())
self._lines.append(self._Indent(indent_amt))
break_after_open_bracket = False
else:
actual_indent = self.current_size() + 1
break_after_open_bracket = False
if isinstance(container, (ListComprehension, IfExpression)):
actual_indent = indent_amt
# Increase the continued indentation only if recursing on a
# container.
container.reflow(self, ' ' * actual_indent,
break_after_open_bracket=break_after_open_bracket)
def _prevent_default_initializer_splitting(self, item, indent_amt):
"""Prevent splitting between a default initializer.
When there is a default initializer, it's best to keep it all on
the same line. It's nicer and more readable, even if it goes
over the maximum allowable line length. This goes back along the
current line to determine if we have a default initializer, and,
if so, to remove extraneous whitespaces and add a line
break/indent before it if needed.
"""
if unicode(item) == '=':
# This is the assignment in the initializer. Just remove spaces for
# now.
self._delete_whitespace()
return
if (not self._prev_item or not self._prev_prev_item or
unicode(self._prev_item) != '='):
return
self._delete_whitespace()
prev_prev_index = self._lines.index(self._prev_prev_item)
if (
isinstance(self._lines[prev_prev_index - 1], self._Indent) or
self.fits_on_current_line(item.size + 1)
):
# The default initializer is already the only item on this line.
# Don't insert a newline here.
return
# Replace the space with a newline/indent combo.
if isinstance(self._lines[prev_prev_index - 1], self._Space):
del self._lines[prev_prev_index - 1]
self.add_line_break_at(self._lines.index(self._prev_prev_item),
indent_amt)
def _split_after_delimiter(self, item, indent_amt):
"""Split the line only after a delimiter."""
self._delete_whitespace()
if self.fits_on_current_line(item.size):
return
last_space = None
for item in reversed(self._lines):
if (
last_space and
(not isinstance(item, Atom) or not item.is_colon)
):
break
else:
last_space = None
if isinstance(item, self._Space):
last_space = item
if isinstance(item, (self._LineBreak, self._Indent)):
return
if not last_space:
return
self.add_line_break_at(self._lines.index(last_space), indent_amt)
def _enforce_space(self, item):
"""Enforce a space in certain situations.
There are cases where we will want a space where normally we
wouldn't put one. This just enforces the addition of a space.
"""
if isinstance(self._lines[-1],
(self._Space, self._LineBreak, self._Indent)):
return
if not self._prev_item:
return
item_text = unicode(item)
prev_text = unicode(self._prev_item)
# Prefer a space around a '.' in an import statement, and between the
# 'import' and '('.
if (
(item_text == '.' and prev_text == 'from') or
(item_text == 'import' and prev_text == '.') or
(item_text == '(' and prev_text == 'import')
):
self._lines.append(self._Space())
def _delete_whitespace(self):
"""Delete all whitespace from the end of the line."""
while isinstance(self._lines[-1], (self._Space, self._LineBreak,
self._Indent)):
del self._lines[-1]
class Atom(object):
"""The smallest unbreakable unit that can be reflowed."""
def __init__(self, atom):
self._atom = atom
def __repr__(self):
return self._atom.token_string
def __len__(self):
return self.size
def reflow(
self, reflowed_lines, continued_indent, extent,
break_after_open_bracket=False,
is_list_comp_or_if_expr=False,
next_is_dot=False
):
if self._atom.token_type == tokenize.COMMENT:
reflowed_lines.add_comment(self)
return
total_size = extent if extent else self.size
if self._atom.token_string not in ',:([{}])':
# Some atoms will need an extra 1-sized space token after them.
total_size += 1
prev_item = reflowed_lines.previous_item()
if (
not is_list_comp_or_if_expr and
not reflowed_lines.fits_on_current_line(total_size) and
not (next_is_dot and
reflowed_lines.fits_on_current_line(self.size + 1)) and
not reflowed_lines.line_empty() and
not self.is_colon and
not (prev_item and prev_item.is_name and
unicode(self) == '(')
):
# Start a new line if there is already something on the line and
# adding this atom would make it go over the max line length.
reflowed_lines.add_line_break(continued_indent)
else:
reflowed_lines.add_space_if_needed(unicode(self))
reflowed_lines.add(self, len(continued_indent),
break_after_open_bracket)
def emit(self):
return self.__repr__()
@property
def is_keyword(self):
return keyword.iskeyword(self._atom.token_string)
@property
def is_string(self):
return self._atom.token_type == tokenize.STRING
@property
def is_name(self):
return self._atom.token_type == tokenize.NAME
@property
def is_number(self):
return self._atom.token_type == tokenize.NUMBER
@property
def is_comma(self):
return self._atom.token_string == ','
@property
def is_colon(self):
return self._atom.token_string == ':'
@property
def size(self):
return len(self._atom.token_string)
class Container(object):
"""Base class for all container types."""
def __init__(self, items):
self._items = items
def __repr__(self):
string = ''
last_was_keyword = False
for item in self._items:
if item.is_comma:
string += ', '
elif item.is_colon:
string += ': '
else:
item_string = unicode(item)
if (
string and
(last_was_keyword or
(not string.endswith(tuple('([{,.:}]) ')) and
not item_string.startswith(tuple('([{,.:}])'))))
):
string += ' '
string += item_string
last_was_keyword = item.is_keyword
return string
def __iter__(self):
for element in self._items:
yield element
def __getitem__(self, idx):
return self._items[idx]
def reflow(self, reflowed_lines, continued_indent,
break_after_open_bracket=False):
last_was_container = False
for (index, item) in enumerate(self._items):
next_item = get_item(self._items, index + 1)
if isinstance(item, Atom):
is_list_comp_or_if_expr = (
isinstance(self, (ListComprehension, IfExpression)))
item.reflow(reflowed_lines, continued_indent,
self._get_extent(index),
is_list_comp_or_if_expr=is_list_comp_or_if_expr,
next_is_dot=(next_item and
unicode(next_item) == '.'))
if last_was_container and item.is_comma:
reflowed_lines.add_line_break(continued_indent)
last_was_container = False
else: # isinstance(item, Container)
reflowed_lines.add(item, len(continued_indent),
break_after_open_bracket)
last_was_container = not isinstance(item, (ListComprehension,
IfExpression))
if (
break_after_open_bracket and index == 0 and
# Prefer to keep empty containers together instead of
# separating them.
unicode(item) == self.open_bracket and
(not next_item or unicode(next_item) != self.close_bracket) and
(len(self._items) != 3 or not isinstance(next_item, Atom))
):
reflowed_lines.add_line_break(continued_indent)
break_after_open_bracket = False
else:
next_next_item = get_item(self._items, index + 2)
if (
unicode(item) not in ['.', '%', 'in'] and
next_item and not isinstance(next_item, Container) and
unicode(next_item) != ':' and
next_next_item and (not isinstance(next_next_item, Atom) or
unicode(next_item) == 'not') and
not reflowed_lines.line_empty() and
not reflowed_lines.fits_on_current_line(
self._get_extent(index + 1) + 2)
):
reflowed_lines.add_line_break(continued_indent)
def _get_extent(self, index):
"""The extent of the full element.
E.g., the length of a function call or keyword.
"""
extent = 0
prev_item = get_item(self._items, index - 1)
seen_dot = prev_item and unicode(prev_item) == '.'
while index < len(self._items):
item = get_item(self._items, index)
index += 1
if isinstance(item, (ListComprehension, IfExpression)):
break
if isinstance(item, Container):
if prev_item and prev_item.is_name:
if seen_dot:
extent += 1
else:
extent += item.size
prev_item = item
continue
elif (unicode(item) not in ['.', '=', ':', 'not'] and
not item.is_name and not item.is_string):
break
if unicode(item) == '.':
seen_dot = True
extent += item.size
prev_item = item
return extent
@property
def is_string(self):
return False
@property
def size(self):
return len(self.__repr__())
@property
def is_keyword(self):
return False
@property
def is_name(self):
return False
@property
def is_comma(self):
return False
@property
def is_colon(self):
return False
@property
def open_bracket(self):
return None
@property
def close_bracket(self):
return None
class Tuple(Container):
"""A high-level representation of a tuple."""
@property
def open_bracket(self):
return '('
@property
def close_bracket(self):
return ')'
class List(Container):
"""A high-level representation of a list."""
@property
def open_bracket(self):
return '['
@property
def close_bracket(self):
return ']'
class DictOrSet(Container):
"""A high-level representation of a dictionary or set."""
@property
def open_bracket(self):
return '{'
@property
def close_bracket(self):
return '}'
class ListComprehension(Container):
"""A high-level representation of a list comprehension."""
@property
def size(self):
length = 0
for item in self._items:
if isinstance(item, IfExpression):
break
length += item.size
return length
class IfExpression(Container):
"""A high-level representation of an if-expression."""
def _parse_container(tokens, index, for_or_if=None):
"""Parse a high-level container, such as a list, tuple, etc."""
# Store the opening bracket.
items = [Atom(Token(*tokens[index]))]
index += 1
num_tokens = len(tokens)
while index < num_tokens:
tok = Token(*tokens[index])
if tok.token_string in ',)]}':
# First check if we're at the end of a list comprehension or
# if-expression. Don't add the ending token as part of the list
# comprehension or if-expression, because they aren't part of those
# constructs.
if for_or_if == 'for':
return (ListComprehension(items), index - 1)
elif for_or_if == 'if':
return (IfExpression(items), index - 1)
# We've reached the end of a container.
items.append(Atom(tok))
# If not, then we are at the end of a container.
if tok.token_string == ')':
# The end of a tuple.
return (Tuple(items), index)
elif tok.token_string == ']':
# The end of a list.
return (List(items), index)
elif tok.token_string == '}':
# The end of a dictionary or set.
return (DictOrSet(items), index)
elif tok.token_string in '([{':
# A sub-container is being defined.
(container, index) = _parse_container(tokens, index)
items.append(container)
elif tok.token_string == 'for':
(container, index) = _parse_container(tokens, index, 'for')
items.append(container)
elif tok.token_string == 'if':
(container, index) = _parse_container(tokens, index, 'if')
items.append(container)
else:
items.append(Atom(tok))
index += 1
return (None, None)
def _parse_tokens(tokens):
"""Parse the tokens.
This converts the tokens into a form where we can manipulate them
more easily.
"""
index = 0
parsed_tokens = []
num_tokens = len(tokens)
while index < num_tokens:
tok = Token(*tokens[index])
assert tok.token_type != token.INDENT
if tok.token_type == tokenize.NEWLINE:
# There's only one newline and it's at the end.
break
if tok.token_string in '([{':
(container, index) = _parse_container(tokens, index)
if not container:
return None
parsed_tokens.append(container)
else:
parsed_tokens.append(Atom(tok))
index += 1
return parsed_tokens
def _reflow_lines(parsed_tokens, indentation, max_line_length,
start_on_prefix_line):
"""Reflow the lines so that it looks nice."""
if unicode(parsed_tokens[0]) == 'def':
# A function definition gets indented a bit more.
continued_indent = indentation + ' ' * 2 * DEFAULT_INDENT_SIZE
else:
continued_indent = indentation + ' ' * DEFAULT_INDENT_SIZE
break_after_open_bracket = not start_on_prefix_line
lines = ReformattedLines(max_line_length)
lines.add_indent(len(indentation.lstrip('\r\n')))
if not start_on_prefix_line:
# If splitting after the opening bracket will cause the first element
# to be aligned weirdly, don't try it.
first_token = get_item(parsed_tokens, 0)
second_token = get_item(parsed_tokens, 1)
if (
first_token and second_token and
unicode(second_token)[0] == '(' and
len(indentation) + len(first_token) + 1 == len(continued_indent)
):
return None
for item in parsed_tokens:
lines.add_space_if_needed(unicode(item), equal=True)
save_continued_indent = continued_indent
if start_on_prefix_line and isinstance(item, Container):
start_on_prefix_line = False
continued_indent = ' ' * (lines.current_size() + 1)
item.reflow(lines, continued_indent, break_after_open_bracket)
continued_indent = save_continued_indent
return lines.emit()
def _shorten_line_at_tokens_new(tokens, source, indentation,
max_line_length):
"""Shorten the line taking its length into account.
The input is expected to be free of newlines except for inside
multiline strings and at the end.
"""
# Yield the original source so to see if it's a better choice than the
# shortened candidate lines we generate here.
yield indentation + source
parsed_tokens = _parse_tokens(tokens)
if parsed_tokens:
# Perform two reflows. The first one starts on the same line as the
# prefix. The second starts on the line after the prefix.
fixed = _reflow_lines(parsed_tokens, indentation, max_line_length,
start_on_prefix_line=True)
if fixed and check_syntax(normalize_multiline(fixed.lstrip())):
yield fixed
fixed = _reflow_lines(parsed_tokens, indentation, max_line_length,
start_on_prefix_line=False)
if fixed and check_syntax(normalize_multiline(fixed.lstrip())):
yield fixed
def _shorten_line_at_tokens(tokens, source, indentation, indent_word,
key_token_strings, aggressive):
"""Separate line by breaking at tokens in key_token_strings.
The input is expected to be free of newlines except for inside
multiline strings and at the end.
"""
offsets = []
for (index, _t) in enumerate(token_offsets(tokens)):
(token_type,
token_string,
start_offset,
end_offset) = _t
assert token_type != token.INDENT
if token_string in key_token_strings:
# Do not break in containers with zero or one items.
unwanted_next_token = {
'(': ')',
'[': ']',
'{': '}'}.get(token_string)
if unwanted_next_token:
if (
get_item(tokens,
index + 1,
default=[None, None])[1] == unwanted_next_token or
get_item(tokens,
index + 2,
default=[None, None])[1] == unwanted_next_token
):
continue
if (
index > 2 and token_string == '(' and
tokens[index - 1][1] in ',(%['
):
# Don't split after a tuple start, or before a tuple start if
# the tuple is in a list.
continue
if end_offset < len(source) - 1:
# Don't split right before newline.
offsets.append(end_offset)
else:
# Break at adjacent strings. These were probably meant to be on
# separate lines in the first place.
previous_token = get_item(tokens, index - 1)
if (
token_type == tokenize.STRING and
previous_token and previous_token[0] == tokenize.STRING
):
offsets.append(start_offset)
current_indent = None
fixed = None
for line in split_at_offsets(source, offsets):
if fixed:
fixed += '\n' + current_indent + line
for symbol in '([{':
if line.endswith(symbol):
current_indent += indent_word
else:
# First line.
fixed = line
assert not current_indent
current_indent = indent_word
assert fixed is not None
if check_syntax(normalize_multiline(fixed)
if aggressive > 1 else fixed):
return indentation + fixed
else:
return None
def token_offsets(tokens):
"""Yield tokens and offsets."""
end_offset = 0
previous_end_row = 0
previous_end_column = 0
for t in tokens:
token_type = t[0]
token_string = t[1]
(start_row, start_column) = t[2]
(end_row, end_column) = t[3]
# Account for the whitespace between tokens.
end_offset += start_column
if previous_end_row == start_row:
end_offset -= previous_end_column
# Record the start offset of the token.
start_offset = end_offset
# Account for the length of the token itself.
end_offset += len(token_string)
yield (token_type,
token_string,
start_offset,
end_offset)
previous_end_row = end_row
previous_end_column = end_column
def normalize_multiline(line):
"""Normalize multiline-related code that will cause syntax error.
This is for purposes of checking syntax.
"""
if line.startswith('def ') and line.rstrip().endswith(':'):
return line + ' pass'
elif line.startswith('return '):
return 'def _(): ' + line
elif line.startswith('@'):
return line + 'def _(): pass'
elif line.startswith('class '):
return line + ' pass'
elif line.startswith(('if ', 'elif ', 'for ', 'while ')):
return line + ' pass'
else:
return line
def fix_whitespace(line, offset, replacement):
"""Replace whitespace at offset and return fixed line."""
# Replace escaped newlines too
left = line[:offset].rstrip('\n\r \t\\')
right = line[offset:].lstrip('\n\r \t\\')
if right.startswith('#'):
return line
else:
return left + replacement + right
def _execute_pep8(pep8_options, source):
"""Execute pep8 via python method calls."""
class QuietReport(pep8.BaseReport):
"""Version of checker that does not print."""
def __init__(self, options):
super(QuietReport, self).__init__(options)
self.__full_error_results = []
def error(self, line_number, offset, text, check):
"""Collect errors."""
code = super(QuietReport, self).error(line_number,
offset,
text,
check)
if code:
self.__full_error_results.append(
{'id': code,
'line': line_number,
'column': offset + 1,
'info': text})
def full_error_results(self):
"""Return error results in detail.
Results are in the form of a list of dictionaries. Each
dictionary contains 'id', 'line', 'column', and 'info'.
"""
return self.__full_error_results
checker = pep8.Checker('', lines=source,
reporter=QuietReport, **pep8_options)
checker.check_all()
return checker.report.full_error_results()
def _remove_leading_and_normalize(line):
return line.lstrip().rstrip(CR + LF) + '\n'
class Reindenter(object):
"""Reindents badly-indented code to uniformly use four-space indentation.
Released to the public domain, by Tim Peters, 03 October 2000.
"""
def __init__(self, input_text):
sio = io.StringIO(input_text)
source_lines = sio.readlines()
self.string_content_line_numbers = multiline_string_lines(input_text)
# File lines, rstripped & tab-expanded. Dummy at start is so
# that we can use tokenize's 1-based line numbering easily.
# Note that a line is all-blank iff it is a newline.
self.lines = []
for line_number, line in enumerate(source_lines, start=1):
# Do not modify if inside a multiline string.
if line_number in self.string_content_line_numbers:
self.lines.append(line)
else:
# Only expand leading tabs.
self.lines.append(_get_indentation(line).expandtabs() +
_remove_leading_and_normalize(line))
self.lines.insert(0, None)
self.index = 1 # index into self.lines of next line
self.input_text = input_text
def run(self, indent_size=DEFAULT_INDENT_SIZE):
"""Fix indentation and return modified line numbers.
Line numbers are indexed at 1.
"""
if indent_size < 1:
return self.input_text
try:
stats = _reindent_stats(tokenize.generate_tokens(self.getline))
except (SyntaxError, tokenize.TokenError):
return self.input_text
# Remove trailing empty lines.
lines = self.lines
# Sentinel.
stats.append((len(lines), 0))
# Map count of leading spaces to # we want.
have2want = {}
# Program after transformation.
after = []
# Copy over initial empty lines -- there's nothing to do until
# we see a line with *something* on it.
i = stats[0][0]
after.extend(lines[1:i])
for i in range(len(stats) - 1):
thisstmt, thislevel = stats[i]
nextstmt = stats[i + 1][0]
have = _leading_space_count(lines[thisstmt])
want = thislevel * indent_size
if want < 0:
# A comment line.
if have:
# An indented comment line. If we saw the same
# indentation before, reuse what it most recently
# mapped to.
want = have2want.get(have, -1)
if want < 0:
# Then it probably belongs to the next real stmt.
for j in range(i + 1, len(stats) - 1):
jline, jlevel = stats[j]
if jlevel >= 0:
if have == _leading_space_count(lines[jline]):
want = jlevel * indent_size
break
if want < 0: # Maybe it's a hanging
# comment like this one,
# in which case we should shift it like its base
# line got shifted.
for j in range(i - 1, -1, -1):
jline, jlevel = stats[j]
if jlevel >= 0:
want = (have + _leading_space_count(
after[jline - 1]) -
_leading_space_count(lines[jline]))
break
if want < 0:
# Still no luck -- leave it alone.
want = have
else:
want = 0
assert want >= 0
have2want[have] = want
diff = want - have
if diff == 0 or have == 0:
after.extend(lines[thisstmt:nextstmt])
else:
for line_number, line in enumerate(lines[thisstmt:nextstmt],
start=thisstmt):
if line_number in self.string_content_line_numbers:
after.append(line)
elif diff > 0:
if line == '\n':
after.append(line)
else:
after.append(' ' * diff + line)
else:
remove = min(_leading_space_count(line), -diff)
after.append(line[remove:])
return ''.join(after)
def getline(self):
"""Line-getter for tokenize."""
if self.index >= len(self.lines):
line = ''
else:
line = self.lines[self.index]
self.index += 1
return line
def _reindent_stats(tokens):
"""Return list of (lineno, indentlevel) pairs.
One for each stmt and comment line. indentlevel is -1 for comment lines, as
a signal that tokenize doesn't know what to do about them; indeed, they're
our headache!
"""
find_stmt = 1 # Next token begins a fresh stmt?
level = 0 # Current indent level.
stats = []
for t in tokens:
token_type = t[0]
sline = t[2][0]
line = t[4]
if token_type == tokenize.NEWLINE:
# A program statement, or ENDMARKER, will eventually follow,
# after some (possibly empty) run of tokens of the form
# (NL | COMMENT)* (INDENT | DEDENT+)?
find_stmt = 1
elif token_type == tokenize.INDENT:
find_stmt = 1
level += 1
elif token_type == tokenize.DEDENT:
find_stmt = 1
level -= 1
elif token_type == tokenize.COMMENT:
if find_stmt:
stats.append((sline, -1))
# But we're still looking for a new stmt, so leave
# find_stmt alone.
elif token_type == tokenize.NL:
pass
elif find_stmt:
# This is the first "real token" following a NEWLINE, so it
# must be the first token of the next program statement, or an
# ENDMARKER.
find_stmt = 0
if line: # Not endmarker.
stats.append((sline, level))
return stats
def _leading_space_count(line):
"""Return number of leading spaces in line."""
i = 0
while i < len(line) and line[i] == ' ':
i += 1
return i
def refactor_with_2to3(source_text, fixer_names, filename=''):
"""Use lib2to3 to refactor the source.
Return the refactored source code.
"""
from lib2to3.refactor import RefactoringTool
fixers = ['lib2to3.fixes.fix_' + name for name in fixer_names]
tool = RefactoringTool(fixer_names=fixers, explicit=fixers)
from lib2to3.pgen2 import tokenize as lib2to3_tokenize
try:
# The name parameter is necessary particularly for the "import" fixer.
return unicode(tool.refactor_string(source_text, name=filename))
except lib2to3_tokenize.TokenError:
return source_text
def check_syntax(code):
"""Return True if syntax is okay."""
try:
return compile(code, '<string>', 'exec')
except (SyntaxError, TypeError, UnicodeDecodeError):
return False
def filter_results(source, results, aggressive):
"""Filter out spurious reports from pep8.
If aggressive is True, we allow possibly unsafe fixes (E711, E712).
"""
non_docstring_string_line_numbers = multiline_string_lines(
source, include_docstrings=False)
all_string_line_numbers = multiline_string_lines(
source, include_docstrings=True)
commented_out_code_line_numbers = commented_out_code_lines(source)
has_e901 = any(result['id'].lower() == 'e901' for result in results)
for r in results:
issue_id = r['id'].lower()
if r['line'] in non_docstring_string_line_numbers:
if issue_id.startswith(('e1', 'e501', 'w191')):
continue
if r['line'] in all_string_line_numbers:
if issue_id in ['e501']:
continue
# We must offset by 1 for lines that contain the trailing contents of
# multiline strings.
if not aggressive and (r['line'] + 1) in all_string_line_numbers:
# Do not modify multiline strings in non-aggressive mode. Remove
# trailing whitespace could break doctests.
if issue_id.startswith(('w29', 'w39')):
continue
if aggressive <= 0:
if issue_id.startswith(('e711', 'w6')):
continue
if aggressive <= 1:
if issue_id.startswith(('e712', 'e713')):
continue
if r['line'] in commented_out_code_line_numbers:
if issue_id.startswith(('e26', 'e501')):
continue
# Do not touch indentation if there is a token error caused by
# incomplete multi-line statement. Otherwise, we risk screwing up the
# indentation.
if has_e901:
if issue_id.startswith(('e1', 'e7')):
continue
yield r
def multiline_string_lines(source, include_docstrings=False):
"""Return line numbers that are within multiline strings.
The line numbers are indexed at 1.
Docstrings are ignored.
"""
line_numbers = set()
previous_token_type = ''
try:
for t in generate_tokens(source):
token_type = t[0]
start_row = t[2][0]
end_row = t[3][0]
if token_type == tokenize.STRING and start_row != end_row:
if (
include_docstrings or
previous_token_type != tokenize.INDENT
):
# We increment by one since we want the contents of the
# string.
line_numbers |= set(range(1 + start_row, 1 + end_row))
previous_token_type = token_type
except (SyntaxError, tokenize.TokenError):
pass
return line_numbers
def commented_out_code_lines(source):
"""Return line numbers of comments that are likely code.
Commented-out code is bad practice, but modifying it just adds even more
clutter.
"""
line_numbers = []
try:
for t in generate_tokens(source):
token_type = t[0]
token_string = t[1]
start_row = t[2][0]
line = t[4]
# Ignore inline comments.
if not line.lstrip().startswith('#'):
continue
if token_type == tokenize.COMMENT:
stripped_line = token_string.lstrip('#').strip()
if (
' ' in stripped_line and
'#' not in stripped_line and
check_syntax(stripped_line)
):
line_numbers.append(start_row)
except (SyntaxError, tokenize.TokenError):
pass
return line_numbers
def shorten_comment(line, max_line_length, last_comment=False):
"""Return trimmed or split long comment line.
If there are no comments immediately following it, do a text wrap.
Doing this wrapping on all comments in general would lead to jagged
comment text.
"""
assert len(line) > max_line_length
line = line.rstrip()
# PEP 8 recommends 72 characters for comment text.
indentation = _get_indentation(line) + '# '
max_line_length = min(max_line_length,
len(indentation) + 72)
MIN_CHARACTER_REPEAT = 5
if (
len(line) - len(line.rstrip(line[-1])) >= MIN_CHARACTER_REPEAT and
not line[-1].isalnum()
):
# Trim comments that end with things like ---------
return line[:max_line_length] + '\n'
elif last_comment and re.match(r'\s*#+\s*\w+', line):
split_lines = textwrap.wrap(line.lstrip(' \t#'),
initial_indent=indentation,
subsequent_indent=indentation,
width=max_line_length,
break_long_words=False,
break_on_hyphens=False)
return '\n'.join(split_lines) + '\n'
else:
return line + '\n'
def normalize_line_endings(lines, newline):
"""Return fixed line endings.
All lines will be modified to use the most common line ending.
"""
return [line.rstrip('\n\r') + newline for line in lines]
def mutual_startswith(a, b):
return b.startswith(a) or a.startswith(b)
def code_match(code, select, ignore):
if ignore:
assert not isinstance(ignore, unicode)
for ignored_code in [c.strip() for c in ignore]:
if mutual_startswith(code.lower(), ignored_code.lower()):
return False
if select:
assert not isinstance(select, unicode)
for selected_code in [c.strip() for c in select]:
if mutual_startswith(code.lower(), selected_code.lower()):
return True
return False
return True
def fix_code(source, options=None, encoding=None, apply_config=False):
"""Return fixed source code.
"encoding" will be used to decode "source" if it is a byte string.
"""
options = _get_options(options, apply_config)
if not isinstance(source, unicode):
source = source.decode(encoding or get_encoding())
sio = io.StringIO(source)
return fix_lines(sio.readlines(), options=options)
def _get_options(raw_options, apply_config):
"""Return parsed options."""
if not raw_options:
return parse_args([''], apply_config=apply_config)
if isinstance(raw_options, dict):
options = parse_args([''], apply_config=apply_config)
for name, value in raw_options.items():
if not hasattr(options, name):
raise ValueError("No such option '{}'".format(name))
# Check for very basic type errors.
expected_type = type(getattr(options, name))
if not isinstance(expected_type, (str, unicode)):
if isinstance(value, (str, unicode)):
raise ValueError(
"Option '{}' should not be a string".format(name))
setattr(options, name, value)
else:
options = raw_options
return options
def fix_lines(source_lines, options, filename=''):
"""Return fixed source code."""
# Transform everything to line feed. Then change them back to original
# before returning fixed source code.
original_newline = find_newline(source_lines)
tmp_source = ''.join(normalize_line_endings(source_lines, '\n'))
# Keep a history to break out of cycles.
previous_hashes = set()
if options.line_range:
# Disable "apply_local_fixes()" for now due to issue #175.
fixed_source = tmp_source
else:
# Apply global fixes only once (for efficiency).
fixed_source = apply_global_fixes(tmp_source,
options,
filename=filename)
passes = 0
long_line_ignore_cache = set()
while hash(fixed_source) not in previous_hashes:
if options.pep8_passes >= 0 and passes > options.pep8_passes:
break
passes += 1
previous_hashes.add(hash(fixed_source))
tmp_source = copy.copy(fixed_source)
fix = FixPEP8(
filename,
options,
contents=tmp_source,
long_line_ignore_cache=long_line_ignore_cache)
fixed_source = fix.fix()
sio = io.StringIO(fixed_source)
return ''.join(normalize_line_endings(sio.readlines(), original_newline))
def fix_file(filename, options=None, output=None, apply_config=False):
if not options:
options = parse_args([filename], apply_config=apply_config)
original_source = readlines_from_file(filename)
fixed_source = original_source
if options.in_place or output:
encoding = detect_encoding(filename)
if output:
output = LineEndingWrapper(wrap_output(output, encoding=encoding))
fixed_source = fix_lines(fixed_source, options, filename=filename)
if options.diff:
new = io.StringIO(fixed_source)
new = new.readlines()
diff = get_diff_text(original_source, new, filename)
if output:
output.write(diff)
output.flush()
else:
return diff
elif options.in_place:
fp = open_with_encoding(filename, encoding=encoding,
mode='w')
fp.write(fixed_source)
fp.close()
else:
if output:
output.write(fixed_source)
output.flush()
else:
return fixed_source
def global_fixes():
"""Yield multiple (code, function) tuples."""
for function in list(globals().values()):
if inspect.isfunction(function):
arguments = inspect.getargspec(function)[0]
if arguments[:1] != ['source']:
continue
code = extract_code_from_function(function)
if code:
yield (code, function)
def apply_global_fixes(source, options, where='global', filename=''):
"""Run global fixes on source code.
These are fixes that only need be done once (unlike those in
FixPEP8, which are dependent on pep8).
"""
if any(code_match(code, select=options.select, ignore=options.ignore)
for code in ['E101', 'E111']):
source = reindent(source,
indent_size=options.indent_size)
for (code, function) in global_fixes():
if code_match(code, select=options.select, ignore=options.ignore):
if options.verbose:
print('---> Applying {0} fix for {1}'.format(where,
code.upper()),
file=sys.stderr)
source = function(source,
aggressive=options.aggressive)
source = fix_2to3(source,
aggressive=options.aggressive,
select=options.select,
ignore=options.ignore,
filename=filename)
return source
def extract_code_from_function(function):
"""Return code handled by function."""
if not function.__name__.startswith('fix_'):
return None
code = re.sub('^fix_', '', function.__name__)
if not code:
return None
try:
int(code[1:])
except ValueError:
return None
return code
def create_parser():
"""Return command-line parser."""
# Do import locally to be friendly to those who use autopep8 as a library
# and are supporting Python 2.6.
import argparse
parser = argparse.ArgumentParser(description=docstring_summary(__doc__),
prog='autopep8')
parser.add_argument('--version', action='version',
version='%(prog)s ' + __version__)
parser.add_argument('-v', '--verbose', action='count',
default=0,
help='print verbose messages; '
'multiple -v result in more verbose messages')
parser.add_argument('-d', '--diff', action='store_true',
help='print the diff for the fixed source')
parser.add_argument('-i', '--in-place', action='store_true',
help='make changes to files in place')
parser.add_argument('--global-config', metavar='filename',
default=DEFAULT_CONFIG,
help='path to a global pep8 config file; if this file '
'does not exist then this is ignored '
'(default: {0})'.format(DEFAULT_CONFIG))
parser.add_argument('--ignore-local-config', action='store_true',
help="don't look for and apply local config files; "
'if not passed, defaults are updated with any '
"config files in the project's root directory")
parser.add_argument('-r', '--recursive', action='store_true',
help='run recursively over directories; '
'must be used with --in-place or --diff')
parser.add_argument('-j', '--jobs', type=int, metavar='n', default=1,
help='number of parallel jobs; '
'match CPU count if value is less than 1')
parser.add_argument('-p', '--pep8-passes', metavar='n',
default=-1, type=int,
help='maximum number of additional pep8 passes '
'(default: infinite)')
parser.add_argument('-a', '--aggressive', action='count', default=0,
help='enable non-whitespace changes; '
'multiple -a result in more aggressive changes')
parser.add_argument('--experimental', action='store_true',
help='enable experimental fixes')
parser.add_argument('--exclude', metavar='globs',
help='exclude file/directory names that match these '
'comma-separated globs')
parser.add_argument('--list-fixes', action='store_true',
help='list codes for fixes; '
'used by --ignore and --select')
parser.add_argument('--ignore', metavar='errors', default='',
help='do not fix these errors/warnings '
'(default: {0})'.format(DEFAULT_IGNORE))
parser.add_argument('--select', metavar='errors', default='',
help='fix only these errors/warnings (e.g. E4,W)')
parser.add_argument('--max-line-length', metavar='n', default=79, type=int,
help='set maximum allowed line length '
'(default: %(default)s)')
parser.add_argument('--line-range', '--range', metavar='line',
default=None, type=int, nargs=2,
help='only fix errors found within this inclusive '
'range of line numbers (e.g. 1 99); '
'line numbers are indexed at 1')
parser.add_argument('--indent-size', default=DEFAULT_INDENT_SIZE,
type=int, metavar='n',
help='number of spaces per indent level '
'(default %(default)s)')
parser.add_argument('files', nargs='*',
help="files to format or '-' for standard in")
return parser
def parse_args(arguments, apply_config=False):
"""Parse command-line options."""
parser = create_parser()
args = parser.parse_args(arguments)
if not args.files and not args.list_fixes:
parser.error('incorrect number of arguments')
args.files = [decode_filename(name) for name in args.files]
if apply_config:
parser = read_config(args, parser)
args = parser.parse_args(arguments)
args.files = [decode_filename(name) for name in args.files]
if '-' in args.files:
if len(args.files) > 1:
parser.error('cannot mix stdin and regular files')
if args.diff:
parser.error('--diff cannot be used with standard input')
if args.in_place:
parser.error('--in-place cannot be used with standard input')
if args.recursive:
parser.error('--recursive cannot be used with standard input')
if len(args.files) > 1 and not (args.in_place or args.diff):
parser.error('autopep8 only takes one filename as argument '
'unless the "--in-place" or "--diff" args are '
'used')
if args.recursive and not (args.in_place or args.diff):
parser.error('--recursive must be used with --in-place or --diff')
if args.in_place and args.diff:
parser.error('--in-place and --diff are mutually exclusive')
if args.max_line_length <= 0:
parser.error('--max-line-length must be greater than 0')
if args.select:
args.select = _split_comma_separated(args.select)
if args.ignore:
args.ignore = _split_comma_separated(args.ignore)
elif not args.select:
if args.aggressive:
# Enable everything by default if aggressive.
args.select = ['E', 'W']
else:
args.ignore = _split_comma_separated(DEFAULT_IGNORE)
if args.exclude:
args.exclude = _split_comma_separated(args.exclude)
else:
args.exclude = []
if args.jobs < 1:
# Do not import multiprocessing globally in case it is not supported
# on the platform.
import multiprocessing
args.jobs = multiprocessing.cpu_count()
if args.jobs > 1 and not args.in_place:
parser.error('parallel jobs requires --in-place')
if args.line_range:
if args.line_range[0] <= 0:
parser.error('--range must be positive numbers')
if args.line_range[0] > args.line_range[1]:
parser.error('First value of --range should be less than or equal '
'to the second')
return args
def read_config(args, parser):
"""Read both user configuration and local configuration."""
try:
from configparser import ConfigParser as SafeConfigParser
from configparser import Error
except ImportError:
from ConfigParser import SafeConfigParser
from ConfigParser import Error
config = SafeConfigParser()
try:
config.read(args.global_config)
if not args.ignore_local_config:
parent = tail = args.files and os.path.abspath(
os.path.commonprefix(args.files))
while tail:
if config.read([os.path.join(parent, fn)
for fn in PROJECT_CONFIG]):
break
(parent, tail) = os.path.split(parent)
defaults = dict((k.lstrip('-').replace('-', '_'), v)
for k, v in config.items('pep8'))
parser.set_defaults(**defaults)
except Error:
# Ignore for now.
pass
return parser
def _split_comma_separated(string):
"""Return a set of strings."""
return set(text.strip() for text in string.split(',') if text.strip())
def decode_filename(filename):
"""Return Unicode filename."""
if isinstance(filename, unicode):
return filename
else:
return filename.decode(sys.getfilesystemencoding())
def supported_fixes():
"""Yield pep8 error codes that autopep8 fixes.
Each item we yield is a tuple of the code followed by its
description.
"""
yield ('E101', docstring_summary(reindent.__doc__))
instance = FixPEP8(filename=None, options=None, contents='')
for attribute in dir(instance):
code = re.match('fix_([ew][0-9][0-9][0-9])', attribute)
if code:
yield (
code.group(1).upper(),
re.sub(r'\s+', ' ',
docstring_summary(getattr(instance, attribute).__doc__))
)
for (code, function) in sorted(global_fixes()):
yield (code.upper() + (4 - len(code)) * ' ',
re.sub(r'\s+', ' ', docstring_summary(function.__doc__)))
for code in sorted(CODE_TO_2TO3):
yield (code.upper() + (4 - len(code)) * ' ',
re.sub(r'\s+', ' ', docstring_summary(fix_2to3.__doc__)))
def docstring_summary(docstring):
"""Return summary of docstring."""
return docstring.split('\n')[0]
def line_shortening_rank(candidate, indent_word, max_line_length,
experimental=False):
"""Return rank of candidate.
This is for sorting candidates.
"""
if not candidate.strip():
return 0
rank = 0
lines = candidate.rstrip().split('\n')
offset = 0
if (
not lines[0].lstrip().startswith('#') and
lines[0].rstrip()[-1] not in '([{'
):
for (opening, closing) in ('()', '[]', '{}'):
# Don't penalize empty containers that aren't split up. Things like
# this "foo(\n )" aren't particularly good.
opening_loc = lines[0].find(opening)
closing_loc = lines[0].find(closing)
if opening_loc >= 0:
if closing_loc < 0 or closing_loc != opening_loc + 1:
offset = max(offset, 1 + opening_loc)
current_longest = max(offset + len(x.strip()) for x in lines)
rank += 4 * max(0, current_longest - max_line_length)
rank += len(lines)
# Too much variation in line length is ugly.
rank += 2 * standard_deviation(len(line) for line in lines)
bad_staring_symbol = {
'(': ')',
'[': ']',
'{': '}'}.get(lines[0][-1])
if len(lines) > 1:
if (
bad_staring_symbol and
lines[1].lstrip().startswith(bad_staring_symbol)
):
rank += 20
for lineno, current_line in enumerate(lines):
current_line = current_line.strip()
if current_line.startswith('#'):
continue
for bad_start in ['.', '%', '+', '-', '/']:
if current_line.startswith(bad_start):
rank += 100
# Do not tolerate operators on their own line.
if current_line == bad_start:
rank += 1000
if (
current_line.endswith(('.', '%', '+', '-', '/')) and
"': " in current_line
):
rank += 1000
if current_line.endswith(('(', '[', '{', '.')):
# Avoid lonely opening. They result in longer lines.
if len(current_line) <= len(indent_word):
rank += 100
# Avoid the ugliness of ", (\n".
if (
current_line.endswith('(') and
current_line[:-1].rstrip().endswith(',')
):
rank += 100
# Also avoid the ugliness of "foo.\nbar"
if current_line.endswith('.'):
rank += 100
if has_arithmetic_operator(current_line):
rank += 100
# Avoid breaking at unary operators.
if re.match(r'.*[(\[{]\s*[\-\+~]$', current_line.rstrip('\\ ')):
rank += 1000
if re.match(r'.*lambda\s*\*$', current_line.rstrip('\\ ')):
rank += 1000
if current_line.endswith(('%', '(', '[', '{')):
rank -= 20
# Try to break list comprehensions at the "for".
if current_line.startswith('for '):
rank -= 50
if current_line.endswith('\\'):
# If a line ends in \-newline, it may be part of a
# multiline string. In that case, we would like to know
# how long that line is without the \-newline. If it's
# longer than the maximum, or has comments, then we assume
# that the \-newline is an okay candidate and only
# penalize it a bit.
total_len = len(current_line)
lineno += 1
while lineno < len(lines):
total_len += len(lines[lineno])
if lines[lineno].lstrip().startswith('#'):
total_len = max_line_length
break
if not lines[lineno].endswith('\\'):
break
lineno += 1
if total_len < max_line_length:
rank += 10
else:
rank += 100 if experimental else 1
# Prefer breaking at commas rather than colon.
if ',' in current_line and current_line.endswith(':'):
rank += 10
# Avoid splitting dictionaries between key and value.
if current_line.endswith(':'):
rank += 100
rank += 10 * count_unbalanced_brackets(current_line)
return max(0, rank)
def standard_deviation(numbers):
"""Return standard devation."""
numbers = list(numbers)
if not numbers:
return 0
mean = sum(numbers) / len(numbers)
return (sum((n - mean) ** 2 for n in numbers) /
len(numbers)) ** .5
def has_arithmetic_operator(line):
"""Return True if line contains any arithmetic operators."""
for operator in pep8.ARITHMETIC_OP:
if operator in line:
return True
return False
def count_unbalanced_brackets(line):
"""Return number of unmatched open/close brackets."""
count = 0
for opening, closing in ['()', '[]', '{}']:
count += abs(line.count(opening) - line.count(closing))
return count
def split_at_offsets(line, offsets):
"""Split line at offsets.
Return list of strings.
"""
result = []
previous_offset = 0
current_offset = 0
for current_offset in sorted(offsets):
if current_offset < len(line) and previous_offset != current_offset:
result.append(line[previous_offset:current_offset].strip())
previous_offset = current_offset
result.append(line[current_offset:])
return result
class LineEndingWrapper(object):
r"""Replace line endings to work with sys.stdout.
It seems that sys.stdout expects only '\n' as the line ending, no matter
the platform. Otherwise, we get repeated line endings.
"""
def __init__(self, output):
self.__output = output
def write(self, s):
self.__output.write(s.replace('\r\n', '\n').replace('\r', '\n'))
def flush(self):
self.__output.flush()
def match_file(filename, exclude):
"""Return True if file is okay for modifying/recursing."""
base_name = os.path.basename(filename)
if base_name.startswith('.'):
return False
for pattern in exclude:
if fnmatch.fnmatch(base_name, pattern):
return False
if fnmatch.fnmatch(filename, pattern):
return False
if not os.path.isdir(filename) and not is_python_file(filename):
return False
return True
def find_files(filenames, recursive, exclude):
"""Yield filenames."""
while filenames:
name = filenames.pop(0)
if recursive and os.path.isdir(name):
for root, directories, children in os.walk(name):
filenames += [os.path.join(root, f) for f in children
if match_file(os.path.join(root, f),
exclude)]
directories[:] = [d for d in directories
if match_file(os.path.join(root, d),
exclude)]
else:
yield name
def _fix_file(parameters):
"""Helper function for optionally running fix_file() in parallel."""
if parameters[1].verbose:
print('[file:{0}]'.format(parameters[0]), file=sys.stderr)
try:
fix_file(*parameters)
except IOError as error:
print(unicode(error), file=sys.stderr)
def fix_multiple_files(filenames, options, output=None):
"""Fix list of files.
Optionally fix files recursively.
"""
filenames = find_files(filenames, options.recursive, options.exclude)
if options.jobs > 1:
import multiprocessing
pool = multiprocessing.Pool(options.jobs)
pool.map(_fix_file,
[(name, options) for name in filenames])
else:
for name in filenames:
_fix_file((name, options, output))
def is_python_file(filename):
"""Return True if filename is Python file."""
if filename.endswith('.py'):
return True
try:
with open_with_encoding(filename) as f:
first_line = f.readlines(1)[0]
except (IOError, IndexError):
return False
if not PYTHON_SHEBANG_REGEX.match(first_line):
return False
return True
def is_probably_part_of_multiline(line):
"""Return True if line is likely part of a multiline string.
When multiline strings are involved, pep8 reports the error as being
at the start of the multiline string, which doesn't work for us.
"""
return (
'"""' in line or
"'''" in line or
line.rstrip().endswith('\\')
)
def wrap_output(output, encoding):
"""Return output with specified encoding."""
return codecs.getwriter(encoding)(output.buffer
if hasattr(output, 'buffer')
else output)
def get_encoding():
"""Return preferred encoding."""
return locale.getpreferredencoding() or sys.getdefaultencoding()
def main(argv=None, apply_config=True):
"""Command-line entry."""
if argv is None:
argv = sys.argv
try:
# Exit on broken pipe.
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
except AttributeError: # pragma: no cover
# SIGPIPE is not available on Windows.
pass
try:
args = parse_args(argv[1:], apply_config=apply_config)
if args.list_fixes:
for code, description in sorted(supported_fixes()):
print('{code} - {description}'.format(
code=code, description=description))
return 0
if args.files == ['-']:
assert not args.in_place
encoding = sys.stdin.encoding or get_encoding()
# LineEndingWrapper is unnecessary here due to the symmetry between
# standard in and standard out.
wrap_output(sys.stdout, encoding=encoding).write(
fix_code(sys.stdin.read(), args, encoding=encoding))
else:
if args.in_place or args.diff:
args.files = list(set(args.files))
else:
assert len(args.files) == 1
assert not args.recursive
fix_multiple_files(args.files, args, sys.stdout)
except KeyboardInterrupt:
return 1 # pragma: no cover
class CachedTokenizer(object):
"""A one-element cache around tokenize.generate_tokens().
Original code written by Ned Batchelder, in coverage.py.
"""
def __init__(self):
self.last_text = None
self.last_tokens = None
def generate_tokens(self, text):
"""A stand-in for tokenize.generate_tokens()."""
if text != self.last_text:
string_io = io.StringIO(text)
self.last_tokens = list(
tokenize.generate_tokens(string_io.readline)
)
self.last_text = text
return self.last_tokens
_cached_tokenizer = CachedTokenizer()
generate_tokens = _cached_tokenizer.generate_tokens
if __name__ == '__main__':
sys.exit(main())
| lgpl-3.0 |
nagyistoce/odoo-dev-odoo | addons/procurement_jit/__init__.py | 374 | 1078 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import procurement_jit
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
01org/iris-panel | iris/packagedb/views/scm.py | 8 | 2713 | # -*- coding: utf-8 -*-
# This file is part of IRIS: Infrastructure and Release Information System
#
# Copyright (C) 2013 Intel Corporation
#
# IRIS is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# version 2.0 as published by the Free Software Foundation.
"""
This is the importing view file for the iris-packagedb application.
Views for importing data from scm/meta/git.
"""
# pylint: disable=C0111,W0622
import os
import logging
from django.contrib.auth.decorators import login_required, permission_required
from django.db.transaction import atomic
from django.core.cache import cache
from rest_framework import status
from rest_framework.response import Response
from rest_framework.decorators import api_view
from iris.etl import scm
from iris.etl.check import check_scm
log = logging.getLogger(__name__)
@api_view(['POST'])
@permission_required('core.scm_update', raise_exception=True)
@atomic
def update(request):
"""
Importing scm data
"""
domains = request.FILES.get('domains')
gittrees = request.FILES.get('gittrees')
if domains and gittrees:
domains_str, gittrees_str = domains.read(), gittrees.read()
detail = check_scm(domains_str, gittrees_str)
if not detail:
log.info('Importing scm data...')
scm_str = ''.join([domains_str, os.linesep, os.linesep,
gittrees_str])
scm.from_string(scm_str)
cache.clear()
detail = 'Successful!'
code = status.HTTP_200_OK
else:
code = status.HTTP_406_NOT_ACCEPTABLE
detail = ','.join(detail)
log.error(detail)
else:
detail = 'Can not find data files!'
code = status.HTTP_406_NOT_ACCEPTABLE
log.error(detail)
content = {'detail': detail}
return Response(content, status=code)
@api_view(['POST'])
@permission_required('core.scm_check', raise_exception=True)
def check(request):
"""
Checking scm data
"""
domains = request.FILES.get('domains')
gittrees = request.FILES.get('gittrees')
if domains and gittrees:
log.info('Checking scm data...')
detail = check_scm(domains.read(), gittrees.read())
if not detail:
detail = 'Successful!'
code = status.HTTP_200_OK
else:
code = status.HTTP_406_NOT_ACCEPTABLE
detail = ','.join(detail)
log.error(detail)
else:
detail = 'Can not find data files!'
code = status.HTTP_406_NOT_ACCEPTABLE
log.error(detail)
content = {'detail': detail}
return Response(content, status=code)
| gpl-2.0 |
0x7678/wireless-ids | wids.py | 2 | 199163 | #! /usr/bin/python
#
# This was written for educational purpose only. Use it at your own risk.
# Author will be not responsible for any damage!
# Written By SY Chua, syworks@gmail.com
#
appver="1.0, R.9"
apptitle="WIDS"
appDesc="- The Wireless Intrusion Detection System"
appcreated="07 Jan 2014"
appupdated="26 Feb 2014"
appnote="by SY Chua, " + appcreated + ", Updated " + appupdated
import sys,os
import subprocess
import random
import curses
from subprocess import call
import termios
import tty
import time
import signal
import select
import datetime
import ssl
import os.path
import binascii, re
import commands
from subprocess import Popen, PIPE
import threading
##################################
# Global Variables Declaration #
##################################
global RTY
RTY=""
def CheckAdmin():
is_admin = os.getuid() == 0
if is_admin==False:
printc ("!!!","Application required admin rights in-order to work properly !","")
exit(1)
class fcolor:
CReset='\033[0m'
CBold='\033[1m'
CDim='\033[2m'
CUnderline='\033[4m'
CBlink='\033[5m'
CInvert='\033[7m'
CHidden='\033[8m'
CDebugB='\033[1;90m'
CDebug='\033[0;90m'
Black='\033[30m'
Red='\033[31m'
Green='\033[32m'
Yellow='\033[33m'
Blue='\033[34m'
Pink='\033[35m'
Cyan='\033[36m'
White='\033[37m'
SBlack='\033[0;30m'
SRed='\033[0;31m'
SGreen='\033[0;32m'
SYellow='\033[0;33m'
SBlue='\033[0;34m'
SPink='\033[0;35m'
SCyan='\033[0;36m'
SWhite='\033[0;37m'
BBlack='\033[1;30m'
BRed='\033[1;31m'
BBlue='\033[1;34m'
BYellow='\033[1;33m'
BGreen='\033[1;32m'
BPink='\033[1;35m'
BCyan='\033[1;36m'
BWhite='\033[1;37m'
UBlack='\033[4;30m'
URed='\033[4;31m'
UGreen='\033[4;32m'
UYellow='\033[4;33m'
UBlue='\033[4;34m'
UPink='\033[4;35m'
UCyan='\033[4;36m'
UWhite='\033[4;37m'
BUBlack=CBold + '\033[4;30m'
BURed=CBold + '\033[4;31m'
BUGreen=CBold + '\033[4;32m'
BUYellow=CBold + '\033[4;33m'
BUBlue=CBold + '\033[4;34m'
BUPink=CBold + '\033[4;35m'
BUCyan=CBold + '\033[4;36m'
BUWhite=CBold + '\033[4;37m'
IGray='\033[0;90m'
IRed='\033[0;91m'
IGreen='\033[0;92m'
IYellow='\033[0;93m'
IBlue='\033[0;94m'
IPink='\033[0;95m'
ICyan='\033[0;96m'
IWhite='\033[0;97m'
BIGray='\033[1;90m'
BIRed='\033[1;91m'
BIGreen='\033[1;92m'
BIYellow='\033[1;93m'
BIBlue='\033[1;94m'
BIPink='\033[1;95m'
BICyan='\033[1;96m'
BIWhite='\033[1;97m'
BGBlack='\033[40m'
BGRed='\033[41m'
BGGreen='\033[42m'
BGYellow='\033[43m'
BGBlue='\033[44m'
BGPink='\033[45m'
BGCyan='\033[46m'
BGWhite='\033[47m'
BGIBlack='\033[100m'
BGIRed='\033[101m'
BGIGreen='\033[102m'
BGIYellow='\033[103m'
BGIBlue='\033[104m'
BGIPink='\033[105m'
BGICyan='\033[106m'
BGIWhite='\033[107m'
def read_a_key():
stdinFileDesc = sys.stdin.fileno()
oldStdinTtyAttr = termios.tcgetattr(stdinFileDesc)
try:
tty.setraw(stdinFileDesc)
sys.stdin.read(1)
finally:
termios.tcsetattr(stdinFileDesc, termios.TCSADRAIN, oldStdinTtyAttr)
def printc(ptype, ptext,ptext2):
"""
Function : Displaying text with pre-defined icon and color
Usage of printc:
ptype - Type of Icon to display
ptext - First sentence to display
ptext2 - Second sentence, "?" as reply text, "@"/"@^" as time in seconds
Examples : Lookup DemoOnPrintC() for examples
"""
ScriptName=os.path.basename(__file__)
printd("PType - " + str(ptype) + "\n " + "PText = " + str(ptext) + "\n " + "PText2 = " + str(ptext2))
ReturnOut=""
bcolor=fcolor.SWhite
pcolor=fcolor.BGreen
tcolor=fcolor.SGreen
if ptype=="i":
pcolor=fcolor.BBlue
tcolor=fcolor.BWhite
if ptype=="H":
pcolor=fcolor.BBlue
tcolor=fcolor.BWhite
hcolor=fcolor.BUBlue
if ptype=="!":
pcolor=fcolor.BRed
tcolor=fcolor.BYellow
if ptype=="!!":
ptype="!"
pcolor=fcolor.BRed
tcolor=fcolor.SRed
if ptype=="!!!":
ptype="!"
pcolor=fcolor.BRed
tcolor=fcolor.BRed
if ptype==".":
pcolor=fcolor.BGreen
tcolor=fcolor.SGreen
if ptype=="-":
pcolor=fcolor.SWhite
tcolor=fcolor.SWhite
if ptype=="--":
ptype="-"
pcolor=fcolor.BWhite
tcolor=fcolor.BWhite
if ptype=="..":
ptype="."
pcolor=fcolor.BGreen
tcolor=fcolor.BGreen
if ptype==">" or ptype=="+":
pcolor=fcolor.BCyan
tcolor=fcolor.BCyan
if ptype==" ":
pcolor=fcolor.BYellow
tcolor=fcolor.Green
if ptype==" ":
pcolor=fcolor.BYellow
tcolor=fcolor.BGreen
if ptype=="?":
pcolor=fcolor.BYellow
tcolor=fcolor.BGreen
if ptype=="x":
pcolor=fcolor.BRed
tcolor=fcolor.BBlue
if ptype=="*":
pcolor=fcolor.BYellow
tcolor=fcolor.BPink
if ptype=="@" or ptype=="@^":
pcolor=fcolor.BRed
tcolor=fcolor.White
firstsixa=""
if ptext!="":
tscolor=fcolor.Blue
ts = time.time()
DateTimeStamp=datetime.datetime.fromtimestamp(ts).strftime('%d/%m/%Y %H:%M:%S')
TimeStamp=datetime.datetime.fromtimestamp(ts).strftime('%H:%M:%S')
DateStamp=datetime.datetime.fromtimestamp(ts).strftime('%d/%m/%Y')
ptext=ptext.replace("%dt -",tscolor + DateTimeStamp + " -" + tcolor)
ptext=ptext.replace("%dt",tscolor + DateTimeStamp + tcolor)
ptext=ptext.replace("%t -",tscolor + TimeStamp + " -" + tcolor)
ptext=ptext.replace("%t",tscolor + TimeStamp + tcolor)
ptext=ptext.replace("%d -",tscolor + DateStamp + " -" + tcolor)
ptext=ptext.replace("%d",tscolor + DateStamp + tcolor)
ptext=ptext.replace("%an",tscolor + ScriptName + tcolor)
if "%cs" in ptext:
ptext=ptext.replace("%cs",tscolor + ptext2 + tcolor)
ptext2=""
lptext=len(ptext)
if lptext>6:
firstsix=ptext[:6].lower()
firstsixa=firstsix
if firstsix=="<$rs$>":
ReturnOut="1"
lptext=lptext-6
ptext=ptext[-lptext:]
if PrintToFile=="1" and ptype!="@" and ptype!="x" and ptype!="@^" and firstsixa!="<$rs$>":
ptypep=ptype
if ptypep==" " or ptypep==" ":
ptypep=" "
else:
ptypep="[" + ptype + "] "
open(LogFile,"a+b").write(RemoveColor(ptypep) + RemoveColor(str(ptext.lstrip().rstrip())) + "\n")
if ptype=="x":
if ptext=="":
ptext="Press Any Key To Continue..."
c1=bcolor + "[" + pcolor + ptype + bcolor + "] " + tcolor + ptext
print c1,
sys.stdout.flush()
read_a_key()
print ""
return
if ptype=="H":
c1=bcolor + "[" + pcolor + "i" + bcolor + "] " + hcolor + ptext + fcolor.CReset
if ReturnOut!="1":
print c1
return c1
else:
return c1
if ptype=="@" or ptype=="@^":
if ptext2=="":
ptext2=5
t=int(ptext2)
while t!=0:
s=bcolor + "[" + pcolor + str(t) + bcolor + "] " + tcolor + ptext + "\r"
s=s.replace("%s",pcolor+str(ptext2)+tcolor)
sl=len(s)
print s,
sys.stdout.flush()
time.sleep(1)
s=""
ss="\r"
print "" + s.ljust(sl+2) + ss,
sys.stdout.flush()
if ptype=="@^":
t=t-1
while sys.stdin in select.select([sys.stdin], [], [], 0)[0]:
line = sys.stdin.readline()
if line:
print bcolor + "[" + fcolor.BRed + "!" + bcolor + "] " + fcolor.Red + "Interupted by User.." + fcolor.Green
return
else:
t=t-1
c1=bcolor + "[" + pcolor + "-" + bcolor + "] " + tcolor + ptext + "\r"
c1=c1.replace("%s",pcolor+str(ptext2)+tcolor)
print c1,
sys.stdout.flush()
return
if ptype=="?":
if ptext2!="":
usr_resp=raw_input(bcolor + "[" + pcolor + ptype + bcolor + "] " + tcolor + ptext + " ( " + pcolor + ptext2 + tcolor + " ) : " + fcolor.BWhite)
return usr_resp;
else:
usr_resp=raw_input(bcolor + "[" + pcolor + ptype + bcolor + "] " + tcolor + ptext + " : " + fcolor.BWhite)
return usr_resp;
if ptype==" " or ptype==" ":
if ReturnOut!="1":
print bcolor + " " + tcolor + ptext + ptext2
else:
return bcolor + " " + tcolor + ptext + ptext2
else:
if ReturnOut!="1":
print bcolor + "[" + pcolor + ptype + bcolor + "] " + tcolor + ptext + ptext2
else:
return bcolor + "[" + pcolor + ptype + bcolor + "] " + tcolor + ptext + ptext2
def AskQuestion(QuestionText, ReplyText,ReplyType,DefaultReply,DisplayReply):
"""
Function : Question for user input. Quite similar to printc("?") function
Usage of AskQuestion:
QuestionText - Question Text to ask
ReplyText - The reply text. Ex : "Y/n")
Examples : Lookup DemoOnPrintC() for examples
"""
if DisplayReply=="":
DisplayReply=1
bcolor=fcolor.SWhite
pcolor=fcolor.BYellow
tcolor=fcolor.BGreen
if ReplyText!="":
usr_resp=raw_input(bcolor + "[" + pcolor + "?" + bcolor + "] " + tcolor + QuestionText + " ( " + pcolor + ReplyText + tcolor + " ) : " + fcolor.BWhite)
else:
usr_resp=raw_input(bcolor + "[" + pcolor + "?" + bcolor + "] " + tcolor + QuestionText + " : " + fcolor.BWhite)
if DefaultReply!="":
if usr_resp=="":
if DisplayReply=="1":
printc (" ",fcolor.SWhite + "Default Selected ==> " + fcolor.BYellow + str(DefaultReply),"")
return DefaultReply
else:
if ReplyType=="U":
if DisplayReply=="1":
printc (" ",fcolor.SWhite + "Selected ==> " + fcolor.BYellow + str(usr_resp.upper()),"")
return usr_resp.upper()
if ReplyType=="FN":
if os.path.isfile(usr_resp)==True:
if DisplayReply=="1":
printc (" ",fcolor.SWhite + "Filename ==> " + fcolor.BYellow + str(usr_resp),"")
return usr_resp
else:
printc ("!!","Filename [" + fcolor.SYellow + usr_resp + fcolor.SRed + "] does not exist !.","")
usr_resp=AskQuestion(QuestionText, ReplyText,ReplyType,DefaultReply,DisplayReply)
return usr_resp;
if ReplyType=="FP":
if os.path.exists(usr_resp)==True:
if DisplayReply=="1":
printc (" ",fcolor.SWhite + "Path ==> " + fcolor.BYellow + str(usr_resp),"")
return usr_resp
else:
printc ("!!","Filename/Pathname [" + fcolor.SYellow + usr_resp + fcolor.SRed + "] does not exist !.","")
usr_resp=AskQuestion(QuestionText, ReplyText,ReplyType,DefaultReply,DisplayReply)
return usr_resp;
if ReplyType=="PN":
if os.path.isdir(usr_resp)==True:
if usr_resp[-1:]!="/":
usr_resp=usr_resp + "/"
if DisplayReply=="1":
printc (" ",fcolor.SWhite + "Path ==> " + fcolor.BYellow + str(usr_resp),"")
return usr_resp
else:
printc ("!!","Path [" + fcolor.SYellow + usr_resp + fcolor.SRed + "] does not exist !.","")
usr_resp=AskQuestion(QuestionText, ReplyText,ReplyType,DefaultReply,DisplayReply)
return usr_resp;
if ReplyType=="L":
if DisplayReply=="1":
printc (" ",fcolor.SWhite + "Selected ==> " + fcolor.BYellow + str(usr_resp.lower()),"")
return usr_resp.lower()
if ReplyType=="N":
if usr_resp.isdigit()==True:
if DisplayReply=="1":
printc (" ",fcolor.SWhite + "Selected ==> " + fcolor.BYellow + str(usr_resp),"")
return usr_resp;
else:
usr_resp=AskQuestion(QuestionText, ReplyText,ReplyType,DefaultReply,DisplayReply)
return usr_resp;
if DefaultReply=="":
if usr_resp=="":
if ReplyText!="":
usr_resp=raw_input(bcolor + "[" + pcolor + "?" + bcolor + "] " + tcolor + QuestionText + " ( " + pcolor + ReplyText + tcolor + " ) : " + fcolor.BWhite)
return usr_resp;
else:
if ReplyType=="MA" or ReplyType=="FN" or ReplyType=="PN" or ReplyType=="FP":
usr_resp=AskQuestion(QuestionText, ReplyText,ReplyType,DefaultReply,DisplayReply)
return usr_resp;
else:
if DisplayReply=="1":
printc (" ",fcolor.SWhite + "Selected ==> " + fcolor.BYellow + str("Nothing"),"")
return usr_resp;
else:
if ReplyType=="MN":
if usr_resp.isdigit()==True:
if DisplayReply=="1":
printc (" ",fcolor.SWhite + "Selected ==> " + fcolor.BYellow + str(usr_resp),"")
return usr_resp;
else:
usr_resp=AskQuestion(QuestionText, ReplyText,ReplyType,DefaultReply,DisplayReply)
return usr_resp;
if ReplyType=="FN":
if os.path.isfile(usr_resp)==True:
if DisplayReply=="1":
printc (" ",fcolor.SWhite + "Filename ==> " + fcolor.BYellow + str(usr_resp),"")
return usr_resp
else:
printc ("!!","Filename [" + fcolor.SYellow + usr_resp + fcolor.SRed + "] does not exist !.","")
usr_resp=AskQuestion(QuestionText, ReplyText,ReplyType,DefaultReply,DisplayReply)
return usr_resp;
if ReplyType=="PN":
if os.path.isdir(usr_resp)==True:
if usr_resp[-1:]!="/":
usr_resp=usr_resp + "/"
if DisplayReply=="1":
printc (" ",fcolor.SWhite + "Path ==> " + fcolor.BYellow + str(usr_resp),"")
return usr_resp
else:
printc ("!!","Path [" + fcolor.SYellow + usr_resp + fcolor.SRed + "] does not exist !.","")
usr_resp=AskQuestion(QuestionText, ReplyText,ReplyType,DefaultReply,DisplayReply)
return usr_resp;
if ReplyType=="FP":
if os.path.exists(usr_resp)==True:
if os.path.isfile(usr_resp)==True:
if DisplayReply=="1":
printc (" ",fcolor.SWhite + "Filename ==> " + fcolor.BYellow + str(usr_resp),"")
return usr_resp
if os.path.isdir(usr_resp)==True:
if usr_resp[-1:]!="/":
usr_resp=usr_resp + "/"
if DisplayReply=="1":
printc (" ",fcolor.SWhite + "Path ==> " + fcolor.BYellow + str(usr_resp),"")
return usr_resp
return usr_resp
else:
printc ("!!","Filename/Pathname [" + fcolor.SYellow + usr_resp + fcolor.SRed + "] does not exist !.","")
usr_resp=AskQuestion(QuestionText, ReplyText,ReplyType,DefaultReply,DisplayReply)
return usr_resp;
if ReplyType=="U":
if DisplayReply=="1":
printc (" ",fcolor.SWhite + "Selected ==> " + fcolor.BYellow + str(usr_resp.upper()),"")
return usr_resp.upper()
if ReplyType=="L":
if DisplayReply=="1":
printc (" ",fcolor.SWhite + "Selected ==> " + fcolor.BYellow + str(usr_resp.lower()),"")
return usr_resp.lower()
if ReplyType=="N":
if usr_resp.isdigit()==True:
if DisplayReply=="1":
printc (" ",fcolor.SWhite + "Selected ==> " + fcolor.BYellow + str(usr_resp),"")
return usr_resp;
else:
usr_resp=AskQuestion(QuestionText, ReplyText,ReplyType,DefaultReply,DisplayReply)
return usr_resp;
if usr_resp=="":
if DisplayReply=="1":
printc (" ",fcolor.SWhite + "Selected ==> " + fcolor.BYellow + str("Nothing"),"")
return usr_resp;
else:
if DisplayReply=="1":
printc (" ",fcolor.SWhite + "Selected ==> " + fcolor.BYellow + str(usr_resp),"")
return usr_resp;
def printl (DisplayText,ContinueBack,PrevIconCount):
"""
Function : Displaying text on the same line
Usage of printl:
DisplayText - Text to Display
ContinueBack = "0" - Start DisplayText on beginning of line.
ContinueBack = "1" - Start from the back of the previous DisplayText
ContinueBack = "2" - Start DisplayText on beginning of line with Icon,PrevIconCount need to contain value
PrevIconCount - Value of last icon count
Examples : Lookup DemoOnPrintl() for examples
"""
icolor=fcolor.BGreen
bcolor=fcolor.SWhite
IconDisplay=""
if ContinueBack=="":
ContinueBack="0"
if PrevIconCount=="":
PrevIconCount="0"
else:
PrevIconCount=int(PrevIconCount)+1
if PrevIconCount>=8:
PrevIconCount=0
PrevIconCount=str(PrevIconCount)
if PrevIconCount=="0":
IconDisplay="|"
if PrevIconCount=="1":
IconDisplay="/"
if PrevIconCount=="2":
IconDisplay="-"
if PrevIconCount=="3":
IconDisplay="\\"
if PrevIconCount=="4":
IconDisplay="|"
if PrevIconCount=="5":
IconDisplay="/"
if PrevIconCount=="6":
IconDisplay="-"
if PrevIconCount=="7":
IconDisplay="\\"
if ContinueBack=="0":
curses.setupterm()
TWidth=curses.tigetnum('cols')
TWidth=TWidth-1
sys.stdout.write("\r")
sys.stdout.flush()
sys.stdout.write (" " * TWidth + "\r")
sys.stdout.flush()
sys.stdout.write(DisplayText)
sys.stdout.flush()
if ContinueBack=="1":
sys.stdout.write(DisplayText)
sys.stdout.flush()
if ContinueBack=="2":
curses.setupterm()
TWidth=curses.tigetnum('cols')
TWidth=TWidth-1
sys.stdout.write("\r")
sys.stdout.flush()
sys.stdout.write (" " * TWidth + "\r")
sys.stdout.flush()
sys.stdout.write(bcolor + "[" + icolor + str(IconDisplay) + bcolor + "] " + DisplayText)
sys.stdout.flush()
return str(PrevIconCount);
def DrawLine(LineChr,LineColor,LineCount):
"""
Function : Drawing of Line with various character type, color and count
Usage of DrawLine:
LineChr - Character to use as line
LineColor - Color of the line
LineCount - Number of character to print. "" is print from one end to another
Examples : Lookup DemoDrawLine for examples
"""
printd(fcolor.CDebugB + "DrawLine Function\n" + fcolor.CDebug + " LineChr - " + str(LineChr) + "\n " + "LineColor = " + str(LineColor) + "\n " + "LineCount = " + str(LineCount))
if LineColor=="":
LineColor=fcolor.SBlack
if LineChr=="":
LineChr="_"
if LineCount=="":
curses.setupterm()
TWidth=curses.tigetnum('cols')
TWidth=TWidth-1
else:
TWidth=LineCount
print LineColor + LineChr * TWidth
def MoveInstallationFiles(srcPath,dstPath):
import shutil
listOfFiles = os.listdir(srcPath)
listOfFiles.sort()
for f in listOfFiles:
if f!=".git" and f!=".gitignore":
srcfile = srcPath + f
dstfile = dstPath + f
if f==ScriptName:
shutil.copy2(srcfile, "/usr/sbin/" + str(ScriptName))
printd("Copy to " + "/usr/sbin/" + str(ScriptName))
result=os.system("chmod +x /usr/sbin/" + ScriptName + " > /dev/null 2>&1")
printd("chmod +x " + "/usr/sbin/" + str(ScriptName))
if os.path.exists(dstfile):
os.remove(dstfile)
shutil.move(srcfile, dstfile)
print fcolor.SGreen + " Moving " + fcolor.CUnderline + f + fcolor.CReset + fcolor.SGreen + " to " + dstfile
if f==ScriptName:
result=os.system("chmod +x " + dstfile + " > /dev/null 2>&1")
printd("chmod +x " + str(dstfile))
def GetScriptVersion(cmdScriptName):
if cmdScriptName=="":
cmdScriptName=str(os.path.realpath(os.path.dirname(sys.argv[0]))) + "/" + str(os.path.basename(__file__))
VerStr=""
findstr="appver=\""
printd ("Get Version : " + cmdScriptName)
if os.path.exists(cmdScriptName)==True:
ps=subprocess.Popen("cat " + cmdScriptName + " | grep '" + findstr + "' | sed -n '1p'" , shell=True, stdout=subprocess.PIPE)
VerStr=ps.stdout.read()
VerStr=VerStr.replace("appver=\"","")
VerStr=VerStr.replace("\"","")
VerStr=VerStr.replace("\n","")
return VerStr;
def GetUpdate(ExitMode):
if ExitMode=="":
ExitMode="1"
github="https://github.com/SYWorks/wireless-ids.git"
Updatetmpdir="/tmp/git-update/"
DownloadedScriptLocation=Updatetmpdir + ScriptName
dstPath=os.getcwd() + "/"
dstPath=appdir
dstScript=dstPath + ScriptName
CurVersion=GetScriptVersion(dstScript)
printc (".","Retrieving update details ....","")
result=RemoveTree(Updatetmpdir,"")
result=os.system("git clone " + github + " " + Updatetmpdir + " > /dev/null 2>&1")
if result==0:
printc (" ",fcolor.SGreen + "Package downloaded..","")
NewVersion=GetScriptVersion(DownloadedScriptLocation)
if CurVersion!=NewVersion:
printc ("i","Current Version\t: " + fcolor.BRed + str(CurVersion),"")
printc (" ",fcolor.BWhite + "New Version\t: " + fcolor.BRed + str(NewVersion),"")
Ask=AskQuestion ("Do you want to update ?","Y/n","","Y","")
if Ask=="y" or Ask=="Y" or Ask=="":
srcPath=Updatetmpdir
result=MoveInstallationFiles(srcPath,dstPath)
result=os.system("chmod +x " + dstScript + " > /dev/null 2>&1")
result=RemoveTree(Updatetmpdir,"")
print ""
printc ("i",fcolor.BGreen + "Application updated !!","")
printc (" ",fcolor.SGreen + "Re-run the updated application on [ " + fcolor.BYellow + dstScript + fcolor.SGreen + " ]..","")
if ExitMode=="1":
exit(0)
else:
return
else:
printc ("i",fcolor.BWhite + "Update aborted..","")
result=RemoveTree(Updatetmpdir,"")
else:
printc ("i","Your already have the latest version [ " + fcolor.BRed + str(CurVersion) + fcolor.BWhite + " ].","")
printc (" ",fcolor.BWhite + "Update aborted..","")
result=RemoveTree(Updatetmpdir,"")
if ExitMode=="1":
exit(0)
else:
return
else:
printd ("Unknown Error : " + str(result))
printc ("!!!","Unable to retrieve update !!","")
if ExitMode=="1":
exit(1)
else:
return
def GetDir(LookupPath):
"""
Function : Return the varius paths such as application path, current path and Temporary path
Example :
"""
import os
import tempfile
pathname, scriptname = os.path.split(sys.argv[0])
if LookupPath=="":
LookupPath="appdir"
LookupPath=LookupPath.lower()
if LookupPath=="curdir":
result=os.getcwd()
if LookupPath=="appdir":
result=os.path.realpath(os.path.dirname(sys.argv[0]))
if LookupPath=="exedir":
result=os.path.dirname(sys.executable)
if LookupPath=="relativedir":
result=pathname
if LookupPath=="scriptdir":
result=os.path.abspath(pathname)
if LookupPath=="sysdir":
result=sys.path[0]
if LookupPath=="pypath":
result=sys.path[1]
if LookupPath=="homedir":
result=os.environ['HOME']
if LookupPath=="tmpdir":
result=tempfile.gettempdir()
if LookupPath=="userset":
result=appdir
result=result + "/"
if result[-2:]=="//":
result=result[:len(str(result))-1]
return result;
def CheckLinux():
"""
Function : Check for Current OS. Exit if not using Linux
"""
from subprocess import call
from platform import system
os = system()
printd ("Operating System : " + os)
if os != 'Linux':
printc ("!!!","This application only works on Linux.","")
exit(1)
def CheckPyVersion(MinPyVersion):
"""
Function : Check for current Python Version.
Exit if current version is less than MinPyVersion
"""
import platform
PyVersion = platform.python_version()
printd ("Python Version : " + PyVersion)
if MinPyVersion!="":
if MinPyVersion >= PyVersion:
printc ("!!!",fcolor.BGreen + "Your Python version " + fcolor.BRed + str(PyVersion) + fcolor.BGreen + " may be outdated.","")
printc (" ",fcolor.BWhite + "Minimum version required for this application is " + fcolor.BRed + str(MinPyVersion) + fcolor.BWhite + ".","")
exit(0)
def GetAppName():
"""
Function : Get Current Script Name
Return : ScriptName = Actual script name
DScriptName = For Display
"""
global ScriptName
global FullScriptName
global DScriptName
ScriptName=os.path.basename(__file__)
DScriptName="./" + ScriptName
appdir=os.path.realpath(os.path.dirname(sys.argv[0]))
FullScriptName=str(appdir) + "/" + str(ScriptName)
printd("FullScriptName : " + FullScriptName)
printd("ScriptName : " + str(ScriptName))
def DisplayAppDetail():
print fcolor.SBlue + " $$$$$ $ $$ $ $$ $$$$$ $$$$$ $$ $ $$$$$" + fcolor.SYellow + " / \\"
print fcolor.SBlue + " $ $ $$ $ $ $$ $ $$ $ $$ $$ $$ $ " + fcolor.SYellow + " ( R )"
print fcolor.SBlue + " $$$$ $$$$ $ $$ $ $ $$ $$$$$ $$$$ $$$$" + fcolor.SYellow + " \\_/"
print fcolor.SBlue + " $ $$ $ $ $ $$ $ $$ $ $$ $ $$ $$"
print fcolor.SBlue + " $ $$ $ $ $$$ $$ $$ $ $$ $ $$ $"
print fcolor.SBlue + " $$$$$ $$ $$ $$ $$$$$ $ $$ $ $$ $$$$$ "
print ""
print fcolor.BGreen + apptitle + " " + appver + fcolor.SGreen + " " + appDesc
print fcolor.CReset + fcolor.White + appnote
print ""
def DisplayDisclaimer():
printc ("!!!","Legal Disclaimer :- " + fcolor.Red + "FOR EDUCATIONAL PURPOSES ONLY !!","")
print fcolor.SWhite + " Usage of this application for attacking target without prior mutual consent is illegal. It is the"
print fcolor.SWhite + " end user's responsibility to obey all applicable local, state and federal laws. Author assume no"
print fcolor.SWhite + " liability and are not responsible for any misuse or damage caused by this application."
print ""
def DisplayFullDescription():
print fcolor.BRed + " Description : "
print fcolor.SGreen + " This a a beta release and reliablity of the information might not be totally accurate.."
print fcolor.SWhite + " This application sniff the surrounding wireless network for any suspicious packets detected such as high amount of"
print fcolor.SWhite + " association/authentication packets, suspicious data sent via broadcast address, unreasonable high amount of deauth"
print fcolor.SWhite + " packets or EAP association packets which in the other way indicated possible way indicated possible WEP/WPA/WPS"
print fcolor.SWhite + " attacks found.."
print fcolor.BWhite + " New !! " + fcolor.SWhite + "Detecting connected client for possible Rogue AP"
print ""
def DisplayDescription():
print fcolor.BRed + "Description : "
print fcolor.SWhite + " This application sniff your surrounding wireless traffic and analyse for suspicious packets such as"
print fcolor.SWhite + " WEP/WPA/WPS attacks, wireless client switched to another access point, detection of possible Rogue AP,"
print fcolor.SWhite + " displaying AP with the same name and much more.. "
print ""
def DisplayDetailHelp():
print fcolor.BGreen + "Usage : " + fcolor.BYellow + "" + DScriptName + fcolor.BWhite + " [options] " + fcolor.BBlue + "<args>"
print fcolor.CReset + fcolor.Black + " Running application without parameter will fire up the interactive mode."
print ""
print fcolor.BIPink + "Options:" + fcolor.CReset
print fcolor.BWhite + " -h --help\t\t" + fcolor.CReset + fcolor.White + "- Show basic help message and exit"
print fcolor.BWhite + " -hh \t\t" + fcolor.CReset + fcolor.White + "- Show advanced help message and exit"
print fcolor.BWhite + " --update\t" + fcolor.CReset + fcolor.White + "- Check for updates"
print fcolor.BWhite + " --remove\t" + fcolor.CReset + fcolor.White + "- Uninstall application"
print ""
print fcolor.BWhite + " -l --loop" + fcolor.BBlue + " <arg>\t" + fcolor.CReset + fcolor.White + "- Run the number of loop before exiting"
print fcolor.BWhite + " -i --iface" + fcolor.BBlue + " <arg>\t" + fcolor.CReset + fcolor.White + "- Set Interface to use"
print fcolor.BWhite + " -t --timeout" + fcolor.BBlue + " <arg>\t" + fcolor.CReset + fcolor.White + "- Duration to capture before analysing the captured data"
print fcolor.BWhite + " -hp --hidepropbe" + fcolor.BBlue + "\t" + fcolor.CReset + fcolor.White + "- Hide displaying of Probing devices."
print fcolor.BWhite + " -la --log-a" + fcolor.BBlue + " \t" + fcolor.CReset + fcolor.White + "- Append to current scanning log detail"
print fcolor.BWhite + " -lo --log-o" + fcolor.BBlue + " \t" + fcolor.CReset + fcolor.White + "- Overwrite existing scanning logs"
print fcolor.BWhite + " --log" + fcolor.BBlue + "\t\t" + fcolor.CReset + fcolor.White + "- Similar to --log-o"
print ""
print fcolor.BGreen + "Examples: " + fcolor.BYellow + "" + DScriptName + fcolor.BWhite + " --update"
print fcolor.BGreen + " " + fcolor.BYellow + "" + DScriptName + fcolor.BWhite + " -i " + fcolor.BBlue + "wlan0" + fcolor.BWhite + " -t " + fcolor.BBlue + "120"+ fcolor.BWhite
print fcolor.BGreen + " " + fcolor.BYellow + "" + DScriptName + fcolor.BWhite + " --loop " + fcolor.BBlue + "10" + fcolor.BWhite + " --timeout " + fcolor.BBlue + "30"+ fcolor.BWhite
print fcolor.BGreen + " " + fcolor.BYellow + "" + DScriptName + fcolor.BWhite + " --iface " + fcolor.BBlue + "wlan1" + fcolor.BWhite + " --timeout " + fcolor.BBlue + "20"+ fcolor.BWhite
print ""
DrawLine("-",fcolor.CReset + fcolor.Black,"")
print ""
def DisplayHelp():
print fcolor.BGreen + "Usage : " + fcolor.BYellow + "" + DScriptName + fcolor.BWhite + " [options] " + fcolor.BBlue + "<args>"
print fcolor.CReset + fcolor.Black + " Running application without parameter will fire up the interactive mode."
print ""
print fcolor.BIPink + "Options:" + fcolor.CReset
print fcolor.BWhite + " -h --help\t\t" + fcolor.CReset + fcolor.White + "- Show basic help message and exit"
print fcolor.BWhite + " -hh \t\t" + fcolor.CReset + fcolor.White + "- Show advanced help message and exit"
print ""
print fcolor.BWhite + " -i --iface" + fcolor.BBlue + " <arg>\t" + fcolor.CReset + fcolor.White + "- Set Interface to use"
print fcolor.BWhite + " -t --timeout" + fcolor.BBlue + " <arg>\t" + fcolor.CReset + fcolor.White + "- Duration to capture before analysing the captured data"
print fcolor.BWhite + " -hp --hidepropbe" + fcolor.BBlue + "\t" + fcolor.CReset + fcolor.White + "- Hide displaying of Probing devices."
print ""
print fcolor.BGreen + "Examples: " + fcolor.BYellow + "" + DScriptName + fcolor.BWhite + " --update"
print fcolor.BGreen + " " + fcolor.BYellow + "" + DScriptName + fcolor.BWhite + " -i " + fcolor.BBlue + "wlan0" + fcolor.BWhite + " -t " + fcolor.BBlue + "120"+ fcolor.BWhite
print fcolor.BGreen + " " + fcolor.BYellow + "" + DScriptName + fcolor.BWhite + " --iface " + fcolor.BBlue + "wlan1" + fcolor.BWhite + " --timeout " + fcolor.BBlue + "20"+ fcolor.BWhite
print ""
DrawLine("-",fcolor.CReset + fcolor.Black,"")
print ""
def GetParameter(cmdDisplay):
"""
cmdDisplay = "0" : Does not display help if not specified
"1" : Display help even not specified
"2" : Display Help, exit if error
"""
global DebugMode
global AllArguments
global SELECTED_IFACE
global PRINTTOFILE
global ReadPacketOnly
global LoopCount
global TEMP_HIDEPROBE
TEMP_HIDEPROBE="0"
ReadPacketOnly=""
LoopCount=99999999
SELECTED_IFACE=""
global SELECTED_MON
SELECTED_MON=""
PRINTTOFILE=""
global TIMEOUT
TIMEOUT=20
global ASSIGNED_MAC
ASSIGNED_MAC=""
global SPOOF_MAC
SPOOF_MAC=""
AllArguments=""
import sys, getopt
if cmdDisplay=="":
cmdDisplay="0"
Err=0
totalarg=len(sys.argv)
printd ("Argument Len : " + str(totalarg))
printd ("Argument String : " + str(sys.argv))
if totalarg>1:
i=1
while i < totalarg:
Err=""
if i>0:
i2=i+1
if i2 >= len(sys.argv):
i2=i
i2str=""
else:
i2str=str(sys.argv[i2])
argstr=("Argument %d : %s" % (i, str(sys.argv[i])))
printd (argstr)
arg=str(sys.argv[i])
if arg=="-h" or arg=="--help":
DisplayHelp()
Err=0
exit()
break;
elif arg=="-hh":
DisplayDetailHelp()
Err=0
exit()
elif arg=="-ro":
Err=0
ReadPacketOnly="1"
elif arg=="--update":
Err=0
GetUpdate("1")
exit()
elif arg=="--remove":
Err=0
UninstallApplication()
exit()
elif arg=="--spoof":
AllArguments=AllArguments + fcolor.BWhite + "Spoof MAC\t\t: " + fcolor.BRed + "Enabled\n"
SPOOF_MAC="1"
Err=0
elif arg=="-m" or arg=="--mac":
i=i2
if i2str=="":
printc("!!!","Invalid MAC Address set !","")
Err=1
else:
Err=0
if i2str[:1]!="-":
if len(i2str)==17:
Result=CheckMAC(i2str)
if Result!="":
ASSIGNED_MAC=i2str
AllArguments=AllArguments + fcolor.BWhite + "Selected MAC\t\t: " + fcolor.BRed + i2str + "\n"
SPOOF_MAC="1"
else:
printc("!!!","Invalid MAC Address set [ " + fcolor.BWhite + i2str + fcolor.BRed + " ] !","")
Err=1
else:
printc("!!!","Invalid MAC Address set [ " + fcolor.BWhite + i2str + fcolor.BRed + " ] !","")
Err=1
else:
printc("!!!","Invalid MAC Address set [ " + fcolor.BWhite + i2str + fcolor.BRed + " ] !","")
Err=1
elif arg=="-t" or arg=="--timeout":
i=i2
if i2str=="":
printc("!!!","Invalid timeout variable set !","")
Err=1
else:
Err=0
if i2str[:1]!="-":
if i2str.isdigit():
TIMEOUT=i2str
AllArguments=AllArguments + fcolor.BWhite + "Timeout (Seconds)\t: " + fcolor.BRed + str(TIMEOUT) + "\n"
if float(TIMEOUT)<20:
AllArguments=AllArguments + fcolor.SWhite + "\t\t\t: Timeout second set may be to low for detection.\n"
else:
printc("!!!","Invalid timeout variable set [ " + fcolor.BWhite + i2str + fcolor.BRed + " ] !","")
Err=1
else:
printc("!!!","Invalid timeout variable set [ " + fcolor.BWhite + i2str + fcolor.BRed + " ] !","")
Err=1
elif arg=="-l" or arg=="--loop":
i=i2
if i2str=="":
printc("!!!","Invalid loopcount variable set !","")
Err=1
else:
Err=0
if i2str[:1]!="-":
if i2str.isdigit():
LoopCount=i2str
if float(LoopCount)<1:
AllArguments=AllArguments + fcolor.SWhite + "\t\t\t: Minimum loop count is 1.\n"
LoopCount=1
AllArguments=AllArguments + fcolor.BWhite + "Loop Count\t\t: " + fcolor.BRed + str(LoopCount) + "\n"
else:
printc("!!!","Invalid loop count variable set [ " + fcolor.BWhite + i2str + fcolor.BRed + " ] !","")
Err=1
else:
printc("!!!","Invalid loop count variable set [ " + fcolor.BWhite + i2str + fcolor.BRed + " ] !","")
Err=1
elif arg=="-i" or arg=="--iface":
i=i2
if i2str=="":
printc("!!!","Invalid Interface variable set !","")
Err=1
else:
Err=0
if i2str[:1]!="-":
SELECTED_IFACE=i2str
AllArguments=AllArguments + fcolor.BWhite + "Selected interface\t: " + fcolor.BRed + i2str + "\n"
else:
printc("!!!","Invalid Interface variable set [ " + fcolor.BWhite + i2str + fcolor.BRed + " ] !","")
Err=1
elif arg=="--hideprobe" or arg=="-hp":
TEMP_HIDEPROBE="1"
AllArguments=AllArguments + fcolor.BWhite + "Probing Devices\t\t: " + fcolor.BRed + "Hide\n"
Err=0
elif arg=="--log-a" or arg=="-la":
PRINTTOFILE="1"
AllArguments=AllArguments + fcolor.BWhite + "Result Logging\t\t: " + fcolor.BRed + "Append\n"
Err=0
elif arg=="--log-o" or arg=="-lo" or arg=="--log":
PRINTTOFILE="1"
AllArguments=AllArguments + fcolor.BWhite + "Result Logging\t\t: " + fcolor.BRed + "Overwrite\n"
open(LogFile,"wb").write("")
Err=0
elif Err=="":
DisplayHelp()
printc("!!!","Invalid option set ! [ " + fcolor.BGreen + arg + fcolor.BRed + " ]","")
Err=1
exit(0)
if Err==1:
if cmdDisplay=="2":
print ""
DisplayHelp()
exit(0)
i=i+1
if AllArguments!="":
print fcolor.BYellow + "Parameter set:"
print AllArguments
else:
print ""
DisplayHelp()
print ""
printc ("i", fcolor.BCyan + "Entering Semi-Interactive Mode..","")
result=DisplayTimeStamp("start","")
print ""
else:
if cmdDisplay=="1":
DisplayHelp()
if cmdDisplay=="2":
DisplayHelp()
exit(0)
else:
printc ("i", fcolor.BCyan + "Entering Interactive Mode..","")
result=DisplayTimeStamp("start","")
print ""
def GetFileLine(filename,omitblank):
global TotalLine
global UsableLine
TotalLine=0
UsableLine=0
if omitblank=="":
omitblank="0"
if omitblank=="1":
with open(filename, 'r') as f:
lines = len(list(filter(lambda x: x.strip(), f)))
TotalLine=lines
UsableLine=lines
if omitblank=="0":
with open(filename) as f:
lines=len(f.readlines())
TotalLine=lines
UsableLine=lines
if omitblank=="2":
lines=0
with open(filename,"r") as f:
for line in f:
sl=len(line.replace("\n",""))
if sl>0:
TotalLine=TotalLine+1
if sl>=8 and sl<=63:
lines=lines+1
UsableLine=lines
return lines
def CheckMAC(MACAddr):
import string
result=""
allchars = "".join(chr(a) for a in range(256))
delchars = set(allchars) - set(string.hexdigits)
mac = MACAddr.translate("".join(allchars),"".join(delchars))
if len(mac) != 12:
print "mac result = " + str(result)
return result;
else:
result=MACAddr.upper()
print "mac result = " + str(result)
return result;
def CheckAppLocation():
import shutil
cpath=0
if os.path.exists(appdir)==True:
printd ("[" + appdir + "] exist..")
else:
printd ("[" + appdir + "] does not exist..")
result=MakeTree(appdir,"")
cpath=1
curdir=os.getcwd() + "/"
printd ("Current Path : " + str(curdir))
CurFileLocation=curdir + ScriptName
AppFileLocation=appdir + ScriptName
printd("Current File : " + str(CurFileLocation))
printd("Designated File : " + str(AppFileLocation))
if os.path.exists(AppFileLocation)==False:
printd("File Not found in " + str(AppFileLocation))
printd("Copy file from [" + str(CurFileLocation) + "] to [" + str(AppFileLocation) + " ]")
shutil.copy2(CurFileLocation, AppFileLocation)
result=os.system("chmod +x " + AppFileLocation + " > /dev/null 2>&1")
if os.path.exists("/usr/sbin/" + ScriptName)==False:
printd("File Not found in " + "/usr/sbin/" + str(ScriptName))
printd("Copy file from [" + str(CurFileLocation) + "] to [" + "/usr/sbin/" + str(ScriptName) + " ]")
shutil.copy2(CurFileLocation, "/usr/sbin/" + str(ScriptName))
result=os.system("chmod +x " + "/usr/sbin/" + str(ScriptName) + " > /dev/null 2>&1")
if PathList!="":
printd("PathList : " + str(PathList))
for path in PathList:
newPath=appdir + path
printd("Checking : " + str(newPath))
if os.path.exists(newPath)==False:
printd("Path [ " + str(newPath) + " ] not found.")
cpath=1
result=MakeTree(newPath,"")
if cpath==1:
print ""
def DisplayTimeStamp(cmdDisplayType,cmdTimeFormat):
global TimeStart
global TimeStop
global DTimeStart
global DTimeStop
lblColor=fcolor.BGreen
txtColor=fcolor.SGreen
cmdDisplayType=cmdDisplayType.lower()
if cmdTimeFormat=="":
timefmt="%Y-%m-%d %H:%M:%S"
else:
timefmt=cmdTimeFormat
if cmdDisplayType=="start":
TimeStop=""
DTimeStop=""
DTimeStart=time.strftime(timefmt)
printc (" ",lblColor + "Started\t: " + txtColor + str(DTimeStart),"")
TimeStart=datetime.datetime.now()
return DTimeStart;
if cmdDisplayType=="start-h":
TimeStop=""
DTimeStop=""
DTimeStart=time.strftime(timefmt)
TimeStart=datetime.datetime.now()
return DTimeStart;
if cmdDisplayType=="stop":
DTimeStop=time.strftime(timefmt)
printc (" ",lblColor + "Stopped\t: " + txtColor + str(DTimeStop),"")
TimeStop=datetime.datetime.now()
return DTimeStop;
if cmdDisplayType=="stop-h":
DTimeStop=time.strftime(timefmt)
TimeStop=datetime.datetime.now()
return DTimeStop;
if TimeStart!="":
if cmdDisplayType=="summary" or cmdDisplayType=="summary-a":
if TimeStop=="":
TimeStop=datetime.datetime.now()
DTimeStop=time.strftime(timefmt)
ElapsedTime = TimeStop - TimeStart
ElapsedTime=str(ElapsedTime)
ElapsedTime=ElapsedTime[:-4]
if cmdDisplayType=="summary-a":
printc (" ",lblColor + "Started\t: " + txtColor + str(DTimeStart),"")
printc (" ",lblColor + "Stopped\t: " + txtColor + str(DTimeStop),"")
printc (" ",lblColor + "Time Spent\t: " + fcolor.BRed + str(ElapsedTime),"")
if cmdDisplayType=="summary":
printc (" ",lblColor + "Time Spent\t: " + fcolor.BRed + str(ElapsedTime),"")
return ElapsedTime;
class GracefulInterruptHandler(object):
def __init__(self, sig=signal.SIGINT):
self.sig = sig
def __enter__(self):
self.interrupted = False
self.released = False
self.original_handler = signal.getsignal(self.sig)
def handler(signum, frame):
self.release()
self.interrupted = True
signal.signal(self.sig, handler)
return self
def __exit__(self, type, value, tb):
self.release()
def release(self):
if self.released:
return False
signal.signal(self.sig, self.original_handler)
self.released = True
return True
def printd(ptext):
if DebugMode=="1":
print fcolor.CDebugB + "[DBG] " + fcolor.CDebug + ptext + fcolor.CReset
if DebugMode=="2":
print fcolor.CDebugB + "[DBG] " + fcolor.CDebug + ptext + fcolor.CReset
print fcolor.CReset + fcolor.White + " [Break - Press Any Key To Continue]" + fcolor.CReset
read_a_key()
def GetInterfaceList(cmdMode):
global IFaceList
global IEEEList
global ModeList
global MACList
global IPList
global BCastList
global MaskList
global UpDownList
global StatusList
global ISerialList
global IPv6List
if cmdMode=="":
cmdMode="ALL"
proc = Popen("ifconfig -a", shell=True, stdout=subprocess.PIPE, stderr=open(os.devnull, 'w'))
IFACE = ""
IEEE = ""
MODE = ""
MACADDR=""
IPADDR=""
IPV6ADDR = ""
BCAST=""
MASK=""
STATUS=""
IFUP=""
LANMODE=""
GATEWAY=""
IFaceCount=0
IFaceList = []
IEEEList = []
ModeList = []
MACList = []
IPList = []
IPv6List = []
BCastList = []
MaskList = []
StatusList = []
UpDownList = []
ISerialList = []
for line in proc.communicate()[0].split('\n'):
if len(line) == 0: continue
if ord(line[0]) != 32:
printd ("Line : " + str(line))
IFACE = line[:line.find(' ')]
IFACE2=IFACE[:2].upper()
printd ("IFACE : " + str(IFACE))
printd ("IFACE2 : " + str(IFACE2))
if IFACE2!="ET" and IFACE2!="LO" and IFACE2!="VM" and IFACE2!="PP" and IFACE2!="AT":
ps=subprocess.Popen("iwconfig " + str(IFACE) + "| grep -i 'Mode:' | tr -s ' ' | egrep -o 'Mode:..................' | cut -d ' ' -f1 | cut -d ':' -f2" , shell=True, stdout=subprocess.PIPE, stderr=open(os.devnull, 'w'))
MODEN=ps.stdout.read().replace("\n","")
MODE=MODEN.upper()
ps=subprocess.Popen("iwconfig " + str(IFACE) + "| grep -o 'IEEE..........................' | cut -d ' ' -f2" , shell=True, stdout=subprocess.PIPE)
IEEE=ps.stdout.read().replace("\n","").upper().replace("802.11","802.11 ")
LANMODE="WLAN"
else:
MODE="NIL"
MODEN="Nil"
IEEE="802.3"
LANMODE="LAN"
if IFACE2=="LO":
MODE="LO"
MODEN="Loopback"
IEEE="Nil"
LANMODE="LO"
printd ("MODE : " + str(MODE))
printd ("MODEN : " + str(MODEN))
ps=subprocess.Popen("ifconfig " + str(IFACE) + " | grep 'HWaddr' | tr -s ' ' | cut -d ' ' -f5" , shell=True, stdout=subprocess.PIPE)
MACADDR=ps.stdout.read().replace("\n","").upper().replace("-",":")
MACADDR=MACADDR[:17]
ps=subprocess.Popen("ifconfig " + str(IFACE) + " | egrep -o '([0-9]{1,3}\.){3}[0-9]{1,3}' | sed -n '1p'" , shell=True, stdout=subprocess.PIPE)
IPADDR=ps.stdout.read().replace("\n","").upper()
ps=subprocess.Popen("ifconfig " + str(IFACE) + " | grep -a -i 'inet6 addr:' | tr -s ' ' | sed -n '1p' | cut -d ' ' -f4" , shell=True, stdout=subprocess.PIPE)
IPV6ADDR=ps.stdout.read().replace("\n","").upper()
ps=subprocess.Popen("ifconfig " + str(IFACE) + " | grep '\<Bcast\>' | sed -n '1p' | tr -s ' ' | cut -d ' ' -f4 | cut -d ':' -f2" , shell=True, stdout=subprocess.PIPE)
BCAST=ps.stdout.read().replace("\n","").upper()
ps=subprocess.Popen("ifconfig " + str(IFACE) + " | grep '\<Mask\>' | sed -n '1p' | tr -s ' ' | cut -d ' ' -f5 | cut -d ':' -f2" , shell=True, stdout=subprocess.PIPE)
MASK=ps.stdout.read().replace("\n","").upper()
if cmdMode=="CON":
ps=subprocess.Popen("netstat -r | grep -a -i '" + str(IFACE) + "' | awk '{print $2}' | egrep -o '([0-9]{1,3}\.){3}[0-9]{1,3}' | sed -n '1p'" , shell=True, stdout=subprocess.PIPE)
GATEWAY=ps.stdout.read().replace("\n","").upper()
else:
GATEWAY=""
printd ("GATEWAY : " + GATEWAY)
ps=subprocess.Popen("ifconfig " + str(IFACE) + " | grep 'MTU:' | sed -n '1p' | tr -s ' ' | grep -o '.\{0,100\}MTU'" , shell=True, stdout=subprocess.PIPE)
STATUS=ps.stdout.read().replace("\n","").upper().replace(" MTU","").lstrip().rstrip()
ps=subprocess.Popen("ifconfig " + str(IFACE) + " | grep 'MTU:' | sed -n '1p' | tr -s ' ' | grep -o '.\{0,100\}MTU' | cut -d ' ' -f2 | grep 'UP'" , shell=True, stdout=subprocess.PIPE)
Result=ps.stdout.read().replace("\n","").upper().lstrip().rstrip()
if Result=="UP":
IFUP="Up"
else:
IFUP="Down"
printd ("STATUS : " + str(STATUS))
printd ("line " + line)
printd ("IEEE : " + IEEE)
printd ("MACADDR : " + str(MACADDR))
printd ("IPADDR : " + str(IPADDR))
printd ("MASK : " + str(MASK))
printd ("IFUP : " + str(IFUP))
printd ("cmdMode := " + str(cmdMode))
if cmdMode=="ALL":
IFaceCount=IFaceCount+1
ModeList.append(str(MODEN))
IFaceList.append(IFACE)
IEEEList.append(IEEE)
MACList.append(MACADDR)
IPList.append(IPADDR)
IPv6List.append(IPV6ADDR)
BCastList.append(BCAST)
MaskList.append(MASK)
StatusList.append(STATUS)
UpDownList.append(IFUP)
ISerialList.append(str(IFaceCount))
if MODE=="MANAGED":
if cmdMode=="MAN":
IFaceCount=IFaceCount+1
ModeList.append(MODEN)
IFaceList.append(IFACE)
IEEEList.append(IEEE)
MACList.append(MACADDR)
IPList.append(IPADDR)
IPv6List.append(IPV6ADDR)
BCastList.append(BCAST)
MaskList.append(MASK)
StatusList.append(STATUS)
UpDownList.append(IFUP)
ISerialList.append(str(IFaceCount))
if MODE=="MONITOR":
if cmdMode=="MON":
IFaceCount=IFaceCount+1
ModeList.append(MODEN)
IFaceList.append(IFACE)
IEEEList.append(IEEE)
MACList.append(MACADDR)
IPList.append(IPADDR)
IPv6List.append(IPV6ADDR)
BCastList.append(BCAST)
MaskList.append(MASK)
StatusList.append(STATUS)
UpDownList.append(IFUP)
ISerialList.append(str(IFaceCount))
if MODE=="MASTER":
if cmdMode=="MAS":
IFaceCount=IFaceCount+1
ModeList.append(MODEN)
IFaceList.append(IFACE)
IEEEList.append(IEEE)
MACList.append(MACADDR)
IPList.append(IPADDR)
IPv6List.append(IPV6ADDR)
BCastList.append(BCAST)
MaskList.append(MASK)
StatusList.append(STATUS)
UpDownList.append(IFUP)
ISerialList.append(str(IFaceCount))
if MODE=="AD-HOC":
if cmdMode=="ADH":
IFaceCount=IFaceCount+1
ModeList.append(MODEN)
IFaceList.append(IFACE)
IEEEList.append(IEEE)
MACList.append(MACADDR)
IPList.append(IPADDR)
IPv6List.append(IPV6ADDR)
BCastList.append(BCAST)
MaskList.append(MASK)
StatusList.append(STATUS)
UpDownList.append(IFUP)
ISerialList.append(str(IFaceCount))
if cmdMode=="IP" and BCAST!="":
if IPV6ADDR!="" or IPADDR!="":
IFaceCount=IFaceCount+1
ModeList.append(MODEN)
IFaceList.append(IFACE)
IEEEList.append(IEEE)
MACList.append(MACADDR)
IPList.append(IPADDR)
IPv6List.append(IPV6ADDR)
BCastList.append(BCAST)
MaskList.append(MASK)
StatusList.append(STATUS)
UpDownList.append(IFUP)
ISerialList.append(str(IFaceCount))
if cmdMode=="CON" and IPADDR!="" and GATEWAY!="" and BCAST!="":
IFaceCount=IFaceCount+1
ModeList.append(MODEN)
IFaceList.append(IFACE)
IEEEList.append(IEEE)
MACList.append(MACADDR)
IPList.append(IPADDR)
IPv6List.append(IPV6ADDR)
BCastList.append(BCAST)
MaskList.append(MASK)
StatusList.append(STATUS)
UpDownList.append(IFUP)
ISerialList.append(str(IFaceCount))
if cmdMode=="WLAN" and LANMODE=="WLAN":
IFaceCount=IFaceCount+1
ModeList.append(MODEN)
IFaceList.append(IFACE)
IEEEList.append(IEEE)
MACList.append(MACADDR)
IPList.append(IPADDR)
IPv6List.append(IPV6ADDR)
BCastList.append(BCAST)
MaskList.append(MASK)
StatusList.append(STATUS)
UpDownList.append(IFUP)
ISerialList.append(str(IFaceCount))
if cmdMode=="LAN" and LANMODE=="LAN":
IFaceCount=IFaceCount+1
ModeList.append(MODEN)
IFaceList.append(IFACE)
IEEEList.append(IEEE)
MACList.append(MACADDR)
IPList.append(IPADDR)
IPv6List.append(IPV6ADDR)
BCastList.append(BCAST)
MaskList.append(MASK)
StatusList.append(STATUS)
UpDownList.append(IFUP)
ISerialList.append(str(IFaceCount))
if cmdMode=="LOOP" and LANMODE=="LO":
IFaceCount=IFaceCount+1
ModeList.append(MODEN)
IFaceList.append(IFACE)
IEEEList.append(IEEE)
MACList.append(MACADDR)
IPList.append(IPADDR)
IPv6List.append(IPV6ADDR)
BCastList.append(BCAST)
MaskList.append(MASK)
StatusList.append(STATUS)
UpDownList.append(IFUP)
ISerialList.append(str(IFaceCount))
return IFaceCount;
def RemoveColor(InText):
if InText!="":
InText=InText.replace('\033[0m','')
InText=InText.replace('\033[1m','')
InText=InText.replace('\033[2m','')
InText=InText.replace('\033[4m','')
InText=InText.replace('\033[5m','')
InText=InText.replace('\033[7m','')
InText=InText.replace('\033[8m','')
InText=InText.replace('\033[1;90m','')
InText=InText.replace('\033[0;90m','')
InText=InText.replace('\033[30m','')
InText=InText.replace('\033[31m','')
InText=InText.replace('\033[32m','')
InText=InText.replace('\033[33m','')
InText=InText.replace('\033[34m','')
InText=InText.replace('\033[35m','')
InText=InText.replace('\033[36m','')
InText=InText.replace('\033[37m','')
InText=InText.replace('\033[0;30m','')
InText=InText.replace('\033[0;31m','')
InText=InText.replace('\033[0;32m','')
InText=InText.replace('\033[0;33m','')
InText=InText.replace('\033[0;34m','')
InText=InText.replace('\033[0;35m','')
InText=InText.replace('\033[0;36m','')
InText=InText.replace('\033[0;37m','')
InText=InText.replace('\033[1;30m','')
InText=InText.replace('\033[1;31m','')
InText=InText.replace('\033[1;34m','')
InText=InText.replace('\033[1;33m','')
InText=InText.replace('\033[1;32m','')
InText=InText.replace('\033[1;35m','')
InText=InText.replace('\033[1;36m','')
InText=InText.replace('\033[1;37m','')
InText=InText.replace('\033[4;30m','')
InText=InText.replace('\033[4;31m','')
InText=InText.replace('\033[4;32m','')
InText=InText.replace('\033[4;33m','')
InText=InText.replace('\033[4;34m','')
InText=InText.replace('\033[4;35m','')
InText=InText.replace('\033[4;36m','')
InText=InText.replace('\033[4;37m','')
InText=InText.replace('\033[0;90m','')
InText=InText.replace('\033[0;91m','')
InText=InText.replace('\033[0;92m','')
InText=InText.replace('\033[0;93m','')
InText=InText.replace('\033[0;94m','')
InText=InText.replace('\033[0;95m','')
InText=InText.replace('\033[0;96m','')
InText=InText.replace('\033[0;97m','')
InText=InText.replace('\033[1;90m','')
InText=InText.replace('\033[1;91m','')
InText=InText.replace('\033[1;92m','')
InText=InText.replace('\033[1;93m','')
InText=InText.replace('\033[1;94m','')
InText=InText.replace('\033[1;95m','')
InText=InText.replace('\033[1;96m','')
InText=InText.replace('\033[1;97m','')
InText=InText.replace('\033[40m','')
InText=InText.replace('\033[41m','')
InText=InText.replace('\033[42m','')
InText=InText.replace('\033[43m','')
InText=InText.replace('\033[44m','')
InText=InText.replace('\033[45m','')
InText=InText.replace('\033[46m','')
InText=InText.replace('\033[47m','')
InText=InText.replace('\033[100m','')
InText=InText.replace('\033[101m','')
InText=InText.replace('\033[102m','')
InText=InText.replace('\033[103m','')
InText=InText.replace('\033[104m','')
InText=InText.replace('\033[105m','')
InText=InText.replace('\033[106m','')
InText=InText.replace('\033[107m','')
return InText;
def CombineListing(List1, List2, List3, List4, List5, List6, List7, List8):
global MergedList
global MergedSpaceList
global TitleList
MergedList=[]
MergedSpaceList=[]
TitleList=[]
CombineText=""
ListMax1=0
ListMax2=0
ListMax3=0
ListMax4=0
ListMax5=0
ListMax6=0
ListMax7=0
ListMax8=0
x=0
if str(List1)!="":
while x < len(List1):
if str(List1[x])!="":
ETxt=RemoveColor(str(List1[x]))
if len(ETxt)>ListMax1:
ListMax1=len(ETxt)
x = x +1
printd ("ListMax1 : " + str(ListMax1))
ListMax1 = ListMax1 + 4
x=0
if str(List2)!="":
while x < len(List2):
if str(List2[x])!="":
ETxt=RemoveColor(str(List2[x]))
if len(ETxt)>ListMax2:
ListMax2=len(ETxt)
x = x +1
printd ("ListMax2 : " + str(ListMax2))
ListMax2 = ListMax2 + 4
x=0
if str(List3)!="":
while x < len(List3):
if str(List3[x])!="":
ETxt=RemoveColor(str(List3[x]))
if len(ETxt)>ListMax3:
ListMax3=len(ETxt)
x = x +1
printd ("ListMax3 : " + str(ListMax3))
ListMax3 = ListMax3 + 4
x=0
if str(List4)!="":
while x < len(List4):
if str(List4[x])!="":
ETxt=RemoveColor(str(List4[x]))
if len(ETxt)>ListMax4:
ListMax4=len(ETxt)
x = x +1
printd ("ListMax4 : " + str(ListMax4))
ListMax4 = ListMax4 + 4
x=0
if str(List5)!="":
while x < len(List5):
if str(List5[x])!="":
ETxt=RemoveColor(str(List5[x]))
if len(ETxt)>ListMax5:
ListMax5=len(ETxt)
x = x +1
printd ("ListMax5 : " + str(ListMax5))
ListMax5 = ListMax5 + 4
x=0
if str(List6)!="":
while x < len(List6):
if str(List6[x])!="":
ETxt=RemoveColor(str(List6[x]))
if len(ETxt)>ListMax6:
ListMax6=len(ETxt)
x = x +1
printd ("ListMax6 : " + str(ListMax6))
ListMax6 = ListMax6 + 4
x=0
if str(List7)!="":
while x < len(List7):
if str(List7[x])!="":
ETxt=RemoveColor(str(List7[x]))
if len(ETxt)>ListMax7:
ListMax7=len(ETxt)
x = x +1
printd ("ListMax7 : " + str(ListMax7))
ListMax7 = ListMax7 + 4
x=0
if str(List8)!="":
while x < len(List8):
if str(List8[x])!="":
ETxt=RemoveColor(str(List8[x]))
if len(ETxt)>ListMax8:
ListMax8=len(ETxt)
x = x +1
printd ("ListMax8 : " + str(ListMax8))
ListMax8 = ListMax8 + 4
printd ("ListMax1 - After + 4 : " + str(ListMax1))
printd ("ListMax2 - After + 4 : " + str(ListMax2))
printd ("ListMax3 - After + 4 : " + str(ListMax3))
printd ("ListMax4 - After + 4 : " + str(ListMax4))
printd ("ListMax5 - After + 4 : " + str(ListMax5))
printd ("ListMax6 - After + 4 : " + str(ListMax6))
printd ("ListMax7 - After + 4 : " + str(ListMax7))
printd ("ListMax8 - After + 4 : " + str(ListMax8))
MergedSpaceList.append(5)
MergedSpaceList.append(ListMax1)
MergedSpaceList.append(ListMax2)
MergedSpaceList.append(ListMax3)
MergedSpaceList.append(ListMax4)
MergedSpaceList.append(ListMax5)
MergedSpaceList.append(ListMax6)
MergedSpaceList.append(ListMax7)
MergedSpaceList.append(ListMax8)
i=0
while i < len(List1):
remain1spc=ListMax1 - len(RemoveColor(List1[i]))
CombineText=List1[i] + "<#&!#>" + " " * remain1spc
if str(List2)!="":
if str(List2[i])!="":
remainspc=ListMax2 - len(RemoveColor(List2[i]))
CombineText=CombineText + List2[i] + " " * remainspc
else:
CombineText=CombineText + " " * ListMax2
if str(List3)!="":
if str(List3[i])!="":
remainspc=ListMax3 - len(RemoveColor(List3[i]))
CombineText=CombineText + "" + List3[i] + " " * remainspc
else:
CombineText=CombineText + "" + " " * ListMax3
if str(List4)!="":
if str(List4[i])!="":
remainspc=ListMax4 - len(RemoveColor(List4[i]))
CombineText=CombineText + "" + List4[i] + " " * remainspc
else:
CombineText=CombineText + "" + " " * ListMax4
if str(List5)!="":
if str(List5[i])!="":
remainspc=ListMax5 - len(RemoveColor(List5[i]))
CombineText=CombineText + "" + List5[i] + " " * remainspc
else:
CombineText=CombineText + "" + " " * ListMax5
if str(List6)!="":
if str(List6[i])!="":
remainspc=ListMax6 - len(RemoveColor(List6[i]))
CombineText=CombineText + "" + List6[i] + " " * remainspc
else:
CombineText=CombineText + "" + " " * ListMax6
if str(List7)!="":
if str(List7[i])!="":
remainspc=ListMax7 - len(RemoveColor(List7[i]))
CombineText=CombineText + "" + List7[i] + " " * remainspc
else:
CombineText=CombineText + "" + " " * ListMax7
if str(List8)!="":
if str(List8[i])!="":
remainspc=ListMax8 - len(RemoveColor(List8[i]))
CombineText=CombineText + "" + List8[i] + " " * remainspc
else:
CombineText=CombineText + "" + " " * ListMax8
CombineText=CombineText.lstrip().rstrip()
MergedList.append(str(CombineText))
i = i + 1
return i;
def QuestionFromList(ListTitle,ListTitleSpace,ListUse,AskQuestion,RtnType):
global ListingIndex
ListingIndex=""
bcolor=fcolor.SWhite
pcolor=fcolor.BYellow
ttcolor=fcolor.BBlue
lcolor=fcolor.SYellow
scolor=fcolor.BRed
tcolor=fcolor.BGreen
x=0
sn=0
CombineTitle=""
totallen=0
while x < len(ListTitle):
xlen=len(ListTitle[x])
remainspc=ListTitleSpace[x] - xlen
if x==8:
remainspc = remainspc - 4
if remainspc<1:
remainspc=1
CombineTitle=CombineTitle + ListTitle[x] + " " * remainspc
x = x +1
totallen=len(CombineTitle) + 1
printl(" ","1","")
DrawLine("=",fcolor.SWhite,totallen)
print bcolor + "[" + pcolor + "*" + bcolor + "] " + ttcolor + str(CombineTitle) + fcolor.CReset
printl(" ","1","")
DrawLine("=",fcolor.SWhite,totallen)
for i, showtext in enumerate(ListUse):
sn=i + 1
remainspc = 4 - len(str(sn))
showtext=showtext.replace("<#&!#>","")
print " " +scolor + str(sn) + "." + " " * remainspc + lcolor+ showtext
printl(" ","1","")
DrawLine("^",fcolor.SWhite,totallen)
usr_resp=raw_input (bcolor + "[" + pcolor + "?" + bcolor + "] " + tcolor + str(AskQuestion) + " [ " + scolor + "1" + tcolor + "-" + scolor + str(sn) + tcolor + " / " + scolor + "0" + fcolor.SWhite + " = Cancel" + tcolor + " ] : " + fcolor.BWhite)
while not usr_resp.isdigit() or int(usr_resp) < 0 or int(usr_resp) > len(ListUse):
print ""
Result=QuestionFromList(ListTitle,ListTitleSpace,ListUse,AskQuestion,RtnType)
return str(Result)
if RtnType=="1":
usr_resp = int(usr_resp) - 1
ListingIndex=usr_resp
SelList=ListUse[int(usr_resp)]
SelList=SelList.replace("<#&!#>","\t")
SelList=RemoveColor(SelList)
POS=SelList.find("\t", 2) +1
SelList=SelList[:POS]
Rtn=SelList
ps=subprocess.Popen("echo " + str(SelList) + " | cut -d '\t' -f1" , shell=True, stdout=subprocess.PIPE)
Rtn=ps.stdout.read()
Rtn=Rtn.replace("\n","")
if usr_resp==-1:
usr_resp=0
Rtn="0"
return Rtn;
else:
usr_resp=usr_resp.replace("\n","")
ListingIndex=usr_resp
return usr_resp;
def DelFile(strFileName,ShowDisplay):
import glob, os
RtnResult=False
if ShowDisplay=="":
ShowDisplay=0
if strFileName.find("*")==-1 and strFileName.find("?")==-1:
Result=IsFileDirExist(strFileName)
if Result=="F":
os.remove(strFileName)
RtnResult=True
if ShowDisplay=="1":
printc (" ",fcolor.SGreen + "File [ " + fcolor.SRed + strFileName + fcolor.SGreen + " ] deleted.","")
else:
if ShowDisplay=="1":
printc ("!!",fcolor.SRed + "File [ " + fcolor.SYellow + strFileName + fcolor.SRed + " ] does not exist.","")
return RtnResult
else:
filelist = glob.glob(strFileName)
fc=0
for f in filelist:
if ShowDisplay=="1":
printc (" ",fcolor.SGreen + "Deleting [ " + fcolor.SRed + str(f) + fcolor.SGreen + " ]...","")
os.remove(f)
fc=fc+1
if ShowDisplay=="1":
printc (" ",fcolor.SGreen + "Total [ " + fcolor.BRed + str(fc) + fcolor.SGreen + " ] files deleted.","")
RtnResult=True
return RtnResult
def IsFileDirExist(strFilePath):
"""
Function : Check if a file/path exist
Return : "F" - Exist File
: "D" - Exist Directory
: "E" - Does not exist
"""
RtnResult="E"
if os.path.exists(strFilePath)==True:
if os.path.isfile(strFilePath)==True:
RtnResult="F"
if os.path.isdir(strFilePath)==True:
RtnResult="D"
return RtnResult;
def MakeTree(dirName,ShowDisplay):
if ShowDisplay=="":
ShowDisplay=0
RtnResult=False
printd ("Make Tree - " + dirName)
printd ("Check Exists : " + str(os.path.exists(dirName)))
printd ("IsFileDirExist : " + str(IsFileDirExist(dirName)))
if not os.path.exists(dirName) or IsFileDirExist(dirName)=="E":
printd ("Tree - " + dirName + " not found")
ldir=[]
splitpath = "/"
ldir = dirName.split("/")
i = 1
while i < len(ldir):
splitpath = splitpath + ldir[i] + "/"
i = i + 1
if not os.path.exists(splitpath):
if ShowDisplay=="1":
printc (" ",fcolor.SGreen + "Creating path [ " + fcolor.SRed + splitpath + fcolor.SGreen + " ] ...","")
os.mkdir(splitpath, 0755)
RtnResult=True
printc (" ",fcolor.SGreen + "Path [ " + fcolor.SRed + dirName + fcolor.SGreen + " ] created...","")
return RtnResult
else:
printd ("Tree - " + dirName + " Found")
printc ("!!",fcolor.SRed + "Path [ " + fcolor.SYellow + dirName + fcolor.SRed + " ] already exist.","")
RtnResult=True
return RtnResult
return RtnResult
def RemoveTree(dirName,ShowDisplay):
import shutil
RtnResult=False
if ShowDisplay=="":
ShowDisplay="0"
if os.path.exists(dirName)==True:
if ShowDisplay=="1":
printc (" ",fcolor.SGreen + "Removing Tree [ " + fcolor.SRed + dirName + fcolor.SGreen + " ] ...","")
shutil.rmtree(dirName)
RtnResult=True
else:
if ShowDisplay=="1":
printc ("!!",fcolor.SRed + "Path [ " + fcolor.SYellow + dirName + fcolor.SRed + " ] does not exist..","")
return RtnResult;
if IsFileDirExist(dirName)=="E":
RtnResult=True
if ShowDisplay=="1":
printc (" ",fcolor.SGreen + "Tree [ " + fcolor.SRed + dirName + fcolor.SGreen + " ] Removed...","")
return RtnResult
else:
return RtnResult
def CopyFile(RootSrcPath,RootDstPath, strFileName,ShowDisplay):
import shutil
import glob, os
RtnResult=False
if ShowDisplay=="":
ShowDisplay=0
if RootSrcPath[-1:]!="/":
RootSrcPath=RootSrcPath + "/"
if RootDstPath[-1:]!="/":
RootDstPath=RootDstPath + "/"
if strFileName.find("*")==-1 and strFileName.find("?")==-1:
Result=IsFileDirExist(RootSrcPath + strFileName)
if Result=="F":
if not os.path.exists(RootDstPath):
if ShowDisplay=="1":
printc (" ",fcolor.SGreen + " Making Directory [ " + fcolor.SRed + RootDstPath + fcolor.SGreen + " ] ....","")
Result=MakeTree(RootDstPath,ShowDisplay)
if os.path.exists(RootDstPath + strFileName):
os.remove(RootDstPath + strFileName)
if ShowDisplay=="1":
printc (" ",fcolor.SGreen + " Removing Existing Destination File [ " + fcolor.SRed + RootDstPath + strFileName + fcolor.SGreen + " ] ....","")
if ShowDisplay=="1":
printc (" ",fcolor.SGreen + " Copying [ " + fcolor.SWhite + RootSrcPath + strFileName + fcolor.SGreen + " ] to [ " + fcolor.SRed + RootDstPath + strFileName + fcolor.SGreen + " ] ....","")
shutil.copy(RootSrcPath + strFileName, RootDstPath + strFileName)
if os.path.exists(RootDstPath + strFileName):
if ShowDisplay=="1":
printc (" ",fcolor.SGreen + " File copied to [ " + fcolor.SRed + RootDstPath + strFileName + fcolor.SGreen + " ] ....","")
RtnResult=True
return RtnResult;
else:
if ShowDisplay=="1":
printc ("!!",fcolor.SRed + " File copying [ " + fcolor.SRed + RootDstPath + strFileName + fcolor.SGreen + " ] failed....","")
return RtnResult;
else:
if ShowDisplay=="1":
printc ("!!",fcolor.SRed + "Source File [ " + fcolor.SRed + RootSrcPath + strFileName + fcolor.SGreen + " ] not found !!","")
return RtnResult;
else:
if not os.path.exists(RootDstPath):
if ShowDisplay=="1":
printc (" ",fcolor.SGreen + " Making Directory [ " + fcolor.SRed + RootDstPath + fcolor.SGreen + " ] ....","")
Result=MakeTree(RootDstPath,ShowDisplay)
if ShowDisplay=="1":
printc (" ",fcolor.SGreen + " Listing File...." + RootSrcPath + strFileName,"")
filelist = glob.glob(RootSrcPath + strFileName)
fc=0
for file in filelist:
if os.path.exists(RootDstPath + file):
os.remove(RootDstPath + file)
if ShowDisplay=="1":
printc (" ",fcolor.SGreen + " Removing Existing Destination File [ " + fcolor.SRed + RootDstPath + file + fcolor.SGreen + " ] ....","")
DstFile=file.replace(RootSrcPath,RootDstPath)
if ShowDisplay=="1":
printc (" ",fcolor.SGreen + " Moving [ " + fcolor.SWhite + file + fcolor.SGreen + " ] to [ " + fcolor.SRed + DstFile + fcolor.SGreen + " ] ....","")
shutil.copy(file, DstFile)
if os.path.exists(DstFile):
fc=fc+1
if ShowDisplay=="1":
printc (" ",fcolor.SGreen + " File copied to [ " + fcolor.SRed + DstFile + fcolor.SGreen + " ] ....","")
else:
if ShowDisplay=="1":
printc ("!!",fcolor.SRed + " File copying [ " + fcolor.SRed + DstFile + fcolor.SGreen + " ] failed....","")
if ShowDisplay=="1":
printc (" ",fcolor.BGreen + "Total [ " + fcolor.BRed + str(fc) + fcolor.BGreen + " ] files copied.","")
RtnResult=fc
return RtnResult
def MoveFile(RootSrcPath,RootDstPath, strFileName,ShowDisplay):
import shutil
import glob, os
RtnResult=False
if ShowDisplay=="":
ShowDisplay=0
if RootSrcPath[-1:]!="/":
RootSrcPath=RootSrcPath + "/"
if RootDstPath[-1:]!="/":
RootDstPath=RootDstPath + "/"
if strFileName.find("*")==-1 and strFileName.find("?")==-1:
Result=IsFileDirExist(RootSrcPath + strFileName)
if Result=="F":
if not os.path.exists(RootDstPath):
if ShowDisplay=="1":
printc (" ",fcolor.SGreen + " Making Directory [ " + fcolor.SRed + RootDstPath + fcolor.SGreen + " ] ....","")
Result=MakeTree(RootDstPath,ShowDisplay)
if os.path.exists(RootDstPath + strFileName):
os.remove(RootDstPath + strFileName)
if ShowDisplay=="1":
printc (" ",fcolor.SGreen + " Removing Existing Destination File [ " + fcolor.SRed + RootDstPath + strFileName + fcolor.SGreen + " ] ....","")
if ShowDisplay=="1":
printc (" ",fcolor.SGreen + " Moving [ " + fcolor.SWhite + RootSrcPath + strFileName + fcolor.SGreen + " ] to [ " + fcolor.SRed + RootDstPath + strFileName + fcolor.SGreen + " ] ....","")
shutil.move(RootSrcPath + strFileName, RootDstPath + strFileName)
if os.path.exists(RootDstPath + strFileName):
if ShowDisplay=="1":
printc (" ",fcolor.SGreen + " File moved to [ " + fcolor.SRed + RootDstPath + strFileName + fcolor.SGreen + " ] ....","")
RtnResult=True
return RtnResult;
else:
if ShowDisplay=="1":
printc ("!!",fcolor.SRed + " File moving [ " + fcolor.SRed + RootDstPath + strFileName + fcolor.SGreen + " ] failed....","")
return RtnResult;
else:
if ShowDisplay=="1":
printc ("!!",fcolor.SRed + "Source File [ " + fcolor.SRed + RootSrcPath + strFileName + fcolor.SGreen + " ] not found !!","")
return RtnResult;
else:
if not os.path.exists(RootDstPath):
if ShowDisplay=="1":
printc (" ",fcolor.SGreen + " Making Directory [ " + fcolor.SRed + RootDstPath + fcolor.SGreen + " ] ....","")
Result=MakeTree(RootDstPath,ShowDisplay)
if ShowDisplay=="1":
printc (" ",fcolor.SGreen + " Listing File...." + RootSrcPath + strFileName,"")
filelist = glob.glob(RootSrcPath + strFileName)
fc=0
for file in filelist:
if os.path.exists(RootDstPath + file):
os.remove(RootDstPath + file)
if ShowDisplay=="1":
printc (" ",fcolor.SGreen + " Removing Existing Destination File [ " + fcolor.SRed + RootDstPath + file + fcolor.SGreen + " ] ....","")
DstFile=file.replace(RootSrcPath,RootDstPath)
if ShowDisplay=="1":
printc (" ",fcolor.SGreen + " Moving [ " + fcolor.SWhite + file + fcolor.SGreen + " ] to [ " + fcolor.SRed + DstFile + fcolor.SGreen + " ] ....","")
shutil.move(file, DstFile)
if os.path.exists(DstFile):
fc=fc+1
if ShowDisplay=="1":
printc (" ",fcolor.SGreen + " File moved to [ " + fcolor.SRed + DstFile + fcolor.SGreen + " ] ....","")
else:
if ShowDisplay=="1":
printc ("!!",fcolor.SRed + " File moving [ " + fcolor.SRed + DstFile + fcolor.SGreen + " ] failed....","")
if ShowDisplay=="1":
printc (" ",fcolor.BGreen + "Total [ " + fcolor.BRed + str(fc) + fcolor.BGreen + " ] files moved.","")
RtnResult=fc
return RtnResult
def MoveTree(RootSrcDir,RootDstDir,ShowDisplay):
import shutil
if ShowDisplay=="":
ShowDisplay="0"
ti=0
td=0
for Src_Dir, dirs, files in os.walk(RootSrcDir):
Dst_Dir = Src_Dir.replace(RootSrcDir, RootDstDir)
if Src_Dir!=RootSrcDir and Dst_Dir!=RootDstDir:
td=td+1
if ShowDisplay=="1":
print fcolor.SGreen + " Moving Directory " + "[ " + fcolor.SWhite + Src_Dir + fcolor.CReset + fcolor.SGreen + " ] to [ " + fcolor.SRed + Dst_Dir + fcolor.CReset + fcolor.SGreen + " ] ..."
if not os.path.exists(Dst_Dir):
os.mkdir(Dst_Dir)
for file_ in files:
SrcFile = os.path.join(Src_Dir, file_)
DstFile = os.path.join(Dst_Dir, file_)
if os.path.exists(DstFile):
os.remove(DstFile)
if ShowDisplay=="1":
print fcolor.SGreen + " Moving File " + "[ " + fcolor.SWhite + SrcFile + fcolor.CReset + fcolor.SGreen + " ] to [ " + fcolor.SRed + DstFile + fcolor.CReset + fcolor.SGreen + " ] ..."
shutil.move(SrcFile, Dst_Dir)
ti=ti+1
if os.path.exists(Dst_Dir):
if ShowDisplay=="1":
printc (" ",fcolor.SGreen + " File moved to [ " + fcolor.SRed + DstFile + fcolor.SGreen + " ] ....","")
if IsFileDirExist(Src_Dir)=="D":
if Src_Dir!=RootSrcDir:
print fcolor.SGreen + " Removing Directory " + "[ " + fcolor.SWhite + Src_Dir + fcolor.CReset + fcolor.SGreen + " ] ...."
Result=os.rmdir(Src_Dir)
if ShowDisplay=="1":
print fcolor.BGreen + " Total [ " + fcolor.BRed + str(td) + fcolor.BGreen + " ] director(ies) and [ " + fcolor.BRed + str(ti) + fcolor.BGreen + " ] file(s) transfered.."
return str(ti);
def CopyTree(RootSrcDir,RootDstDir,ShowDisplay):
import shutil
if ShowDisplay=="":
ShowDisplay="0"
ti=0
td=0
for Src_Dir, dirs, files in os.walk(RootSrcDir):
Dst_Dir = Src_Dir.replace(RootSrcDir, RootDstDir)
if Src_Dir!=RootSrcDir and Dst_Dir!=RootDstDir:
td=td+1
if ShowDisplay=="1":
print fcolor.SGreen + " Copying Directory " + "[ " + fcolor.SWhite + Src_Dir + fcolor.CReset + fcolor.SGreen + " ] to [ " + fcolor.SRed + Dst_Dir + fcolor.CReset + fcolor.SGreen + " ] ..."
if not os.path.exists(Dst_Dir):
os.mkdir(Dst_Dir)
for file_ in files:
SrcFile = os.path.join(Src_Dir, file_)
DstFile = os.path.join(Dst_Dir, file_)
if os.path.exists(DstFile):
if ShowDisplay=="1":
print fcolor.SGreen + " Replacing File " + fcolor.SRed + DstFile + fcolor.CReset + fcolor.SGreen + " ] ..."
os.remove(DstFile)
shutil.copy(SrcFile, Dst_Dir)
else:
if ShowDisplay=="1":
print fcolor.SGreen + " Copy File " + "[ " + fcolor.SWhite + SrcFile + fcolor.CReset + fcolor.SGreen + " ] to [ " + fcolor.SRed + DstFile + fcolor.CReset + fcolor.SGreen + " ] ..."
shutil.copy(SrcFile, Dst_Dir)
ti=ti+1
if os.path.exists(Dst_Dir):
if ShowDisplay=="1":
printc (" ",fcolor.SGreen + " File copied to [ " + fcolor.SRed + DstFile + fcolor.SGreen + " ] ....","")
if ShowDisplay=="1":
print fcolor.BGreen + " Total [ " + fcolor.BRed + str(td) + fcolor.BGreen + " ] director(ies) and [ " + fcolor.BRed + str(ti) + fcolor.BGreen + " ] file(s) copied.."
return str(ti);
def Explore(DirUrlName,ShowDisplay):
if ShowDisplay=="":
ShowDisplay=0
Result=-1
if DirUrlName!="":
if ShowDisplay=="1":
printc (" ",fcolor.SGreen + "Opening location [ " + fcolor.SRed + DirUrlName + fcolor.SGreen + " ] ...","")
Result=os.system("xdg-open " + str(DirUrlName) + " > /dev/null 2>&1")
return Result
def UninstallApplication():
Ask=AskQuestion ("Are you sure you want to remove this application ?","y/N","","N","")
if Ask=="y" or Ask=="Y":
curdir=os.getcwd() + "/"
CurFileLocation=curdir + ScriptName
if os.path.exists(CurFileLocation)==True:
printd("Delete File : " + CurFileLocation)
result=os.remove(CurFileLocation)
if os.path.exists("/usr/sbin/" + ScriptName)==True:
printd("Delete File : " + "/usr/sbin/" + str(ScriptName))
result=os.remove("/usr/sbin/" + ScriptName)
if os.path.exists(appdir)==True:
printd("Remove Path : " + appdir)
result=RemoveTree(appdir,"")
printc ("i", "Application successfully removed !!","")
exit(0)
else:
printc ("i",fcolor.BWhite + "Uninstall aborted..","")
exit(0)
def GetIWList(cmdMode,SELECTED_IFACE,RETRY):
global AP_BSSIDList
global AP_FREQList
global AP_QUALITYList
global AP_SIGNALList
global AP_ENCKEYList
global AP_ESSIDList
global AP_MODEList
global AP_CHANNELList
global AP_ENCTYPEList
if RETRY=="":
AP_BSSIDList=[]
AP_FREQList=[]
AP_QUALITYList=[]
AP_SIGNALList=[]
AP_ENCKEYList=[]
AP_ESSIDList=[]
AP_MODEList=[]
AP_CHANNELList=[]
AP_ENCTYPEList=[]
POPULATE=0
if len(AP_BSSIDList)>0:
Result=AskQuestion(fcolor.SGreen + "An existing list with [ " + fcolor.BRed + str(len(AP_BSSIDList)) + fcolor.SGreen + " ] records were found, " + fcolor.BGreen + "populate existing ?","Y/n","U","Y","1")
if Result=="Y":
POPULATE=1
else:
AP_BSSIDList=[]
AP_FREQList=[]
AP_QUALITYList=[]
AP_SIGNALList=[]
AP_ENCKEYList=[]
AP_ESSIDList=[]
AP_MODEList=[]
AP_CHANNELList=[]
AP_ENCTYPEList=[]
cmdMode=cmdMode.upper()
if cmdMode=="":
cmdMode="ALL"
Result=Run("ifconfig " + SELECTED_IFACE + " up","1")
Result=printc (".","<$rs$>" + "Scanning for Access Point..Please wait..","")
printl(Result,"1","")
iwlistfile=appdir + "tmp/scan.lst"
Result=Run("iwlist " + SELECTED_IFACE + " scanning > " + iwlistfile ,"0")
printl(fcolor.BGreen + " [Completed]","1","")
print ""
statinfo = os.stat(iwlistfile)
if statinfo.st_size==0:
printc ("@",fcolor.SRed + "Scanning failed to get any access point..Retrying in 5 seconds..","5")
GetIWList(cmdMode,SELECTED_IFACE,"1")
return
f = open( iwlistfile, "r" )
AP_BSSID=""
AP_FREQ=""
AP_QUALITY=""
AP_SIGNAL=""
AP_ENCKEY=""
AP_ESSID=""
AP_MODE=""
AP_CHANNEL=""
AP_ENCTYPE=""
if POPULATE=="1":
printc (".","Populating current list...","")
for line in f:
line=line.replace("\n","").lstrip().rstrip()
if line.find("Cell ")!=-1:
if AP_BSSID!="" and AP_MODE!="":
if AP_ENCTYPE=="" and AP_ENCKEY=="ON":
AP_ENCTYPE="WEP"
if AP_ENCTYPE=="" and AP_ENCKEY=="OFF":
AP_ENCTYPE="OPEN"
if AP_ENCTYPE=="WPA2/WPA":
AP_ENCTYPE=="WPA/WPA2"
ADD=""
if cmdMode=="ALL-S" and AP_ESSID.find("\\x")==-1 and AP_ESSID!="":
ADD="1"
if cmdMode=="ALL":
ADD="1"
if cmdMode=="WPA-S" and AP_ENCTYPE.find("WPA")!=-1 and AP_ESSID.find("\\x")==-1 and AP_ESSID!="" and len(AP_ESSID)>2:
ADD="1"
if cmdMode=="WPA" and AP_ENCTYPE.find("WPA")!=-1:
ADD="1"
if cmdMode=="WEP-S" and AP_ENCTYPE.find("WEP")!=-1 and AP_ESSID.find("\\x")==-1 and AP_ESSID!="" and len(AP_ESSID)>2:
ADD="1"
if cmdMode=="WEP" and AP_ENCTYPE.find("WEP")!=-1:
ADD="1"
if cmdMode=="OPN-S" and AP_ENCTYPE.find("OPEN")!=-1 and AP_ESSID.find("\\x")==-1 and AP_ESSID!="" and len(AP_ESSID)>2:
ADD="1"
if cmdMode=="OPN" and AP_ENCTYPE.find("OPEN")!=-1:
ADD="1"
if str(POPULATE)=="1":
if any(AP_BSSID in s for s in AP_BSSIDList):
ADD="0"
if ADD=="1":
if int(AP_QUALITY[:2])<=35:
SNLColor=fcolor.IRed
BSNLColor=fcolor.BIRed
if int(AP_QUALITY[:2])>35 and int(AP_QUALITY[:2])<55:
SNLColor=fcolor.IYellow
BSNLColor=fcolor.BIYellow
if int(AP_QUALITY[:2])>=55:
SNLColor=fcolor.IGreen
BSNLColor=fcolor.BIGreen
if AP_ENCTYPE.find("WPA")!=-1:
AP_ENCTYPE=fcolor.IPink + AP_ENCTYPE
AP_BSSID=SNLColor + AP_BSSID
if AP_ENCTYPE.find("OPEN")!=-1:
AP_ENCTYPE=fcolor.IBlue + AP_ENCTYPE
AP_BSSID=SNLColor + AP_BSSID
if AP_ENCTYPE.find("WEP")!=-1:
AP_ENCTYPE=fcolor.ICyan + AP_ENCTYPE
AP_BSSID=SNLColor + AP_BSSID
AP_BSSIDList.append(str(AP_BSSID))
AP_FREQList.append(str(AP_FREQ))
AP_QUALITYList.append(SNLColor + str(AP_QUALITY))
AP_SIGNALList.append(SNLColor + str(AP_SIGNAL))
AP_ENCKEYList.append(str(AP_ENCKEY))
AP_ESSIDList.append(str(BSNLColor + AP_ESSID))
AP_MODEList.append(str(AP_MODE))
AP_CHANNELList.append(str(AP_CHANNEL))
AP_ENCTYPEList.append(str(AP_ENCTYPE))
AP_BSSID=""
AP_FREQ=""
AP_QUALITY=""
AP_CHANNEL=""
AP_SIGNAL=""
AP_ENCKEY=""
AP_ESSID=""
AP_MODE=""
AP_ENCTYPE=""
POS=line.index('Address:')
if POS>-1:
POS=POS+9
AP_BSSID=str(line[POS:])
if AP_BSSID!="" and line.find("Channel:")!=-1:
POS=line.index('Channel:')
if POS>-1:
POS=POS+8
AP_CHANNEL=str(line[POS:])
if AP_BSSID!="" and line.find("Frequency:")!=-1:
POS=line.index('Frequency:')
if POS>-1:
POS=POS+10
AP_FREQ=str(line[POS:])
POS=AP_FREQ.index(' (')
if POS>-1:
AP_FREQ=str(AP_FREQ[:POS])
if AP_BSSID!="" and line.find("Quality=")!=-1:
POS=line.index('Quality=')
if POS>-1:
POS=POS+8
AP_QUALITY=str(line[POS:])
POS=AP_QUALITY.index(' ')
if POS>-1:
AP_QUALITY=str(AP_QUALITY[:POS])
if AP_BSSID!="" and line.find("Signal level=")!=-1:
POS=line.index('Signal level=')
if POS>-1:
POS=POS+13
AP_SIGNAL=str(line[POS:])
if AP_BSSID!="" and line.find("Encryption key:")!=-1:
POS=line.index('Encryption key:')
if POS>-1:
POS=POS+15
AP_ENCKEY=str(line[POS:]).upper()
if AP_BSSID!="" and line.find("ESSID:")!=-1:
POS=line.index('ESSID:')
if POS>-1:
POS=POS+6
AP_ESSID=str(line[POS:])
if AP_BSSID!="" and line.find("Mode:")!=-1:
POS=line.index('Mode:')
if POS>-1:
POS=POS+5
AP_MODE=str(line[POS:])
if AP_BSSID!="" and line.find("WPA2 Version")!=-1:
if AP_ENCTYPE!="":
if AP_ENCTYPE.find("WPA2")==-1:
AP_ENCTYPE=AP_ENCTYPE + "/WPA2"
else:
AP_ENCTYPE=AP_ENCTYPE + "WPA2"
if AP_BSSID!="" and line.find("WPA Version")!=-1:
if AP_ENCTYPE!="":
AP_ENCTYPE=AP_ENCTYPE + "/WPA"
else:
AP_ENCTYPE=AP_ENCTYPE + "WPA"
AP_ENCTYPE=AP_ENCTYPE.replace("\n","")
if AP_ENCTYPE=="WPA2/WPA":
AP_ENCTYPE="WPA/WPA2"
f.close()
if AP_BSSID!="" and AP_MODE!="":
if AP_ENCTYPE=="" and AP_ENCKEY=="ON":
AP_ENCTYPE="WEP"
if AP_ENCTYPE=="" and AP_ENCKEY=="OFF":
AP_ENCTYPE="OPEN"
if AP_ENCTYPE=="WPA2/WPA":
AP_ENCTYPE=="WPA/WPA2"
ADD=""
if cmdMode=="ALL-S" and AP_ESSID.find("\\x")==-1 and AP_ESSID!="":
ADD="1"
if cmdMode=="ALL":
ADD="1"
if cmdMode=="WPA-S" and AP_ENCTYPE.find("WPA")!=-1 and AP_ESSID.find("\\x")==-1 and AP_ESSID!="" and len(AP_ESSID)>2:
ADD="1"
if cmdMode=="WPA" and AP_ENCTYPE.find("WPA")!=-1:
ADD="1"
if cmdMode=="WEP-S" and AP_ENCTYPE.find("WEP")!=-1 and AP_ESSID.find("\\x")==-1 and AP_ESSID!="" and len(AP_ESSID)>2:
ADD="1"
if cmdMode=="WEP" and AP_ENCTYPE.find("WEP")!=-1:
ADD="1"
if cmdMode=="OPN-S" and AP_ENCTYPE.find("OPEN")!=-1 and AP_ESSID.find("\\x")==-1 and AP_ESSID!="" and len(AP_ESSID)>2:
ADD="1"
if cmdMode=="OPN" and AP_ENCTYPE.find("OPEN")!=-1:
ADD="1"
if ADD=="1":
if int(AP_QUALITY[:2])<=35:
SNLColor=fcolor.IRed
BSNLColor=fcolor.BIRed
if int(AP_QUALITY[:2])>35 and int(AP_QUALITY[:2])<55:
SNLColor=fcolor.IYellow
BSNLColor=fcolor.BIYellow
if int(AP_QUALITY[:2])>=55:
SNLColor=fcolor.IGreen
BSNLColor=fcolor.BIGreen
if AP_ENCTYPE.find("WPA")!=-1:
AP_ENCTYPE=fcolor.IPink + AP_ENCTYPE
AP_BSSID=SNLColor + AP_BSSID
if AP_ENCTYPE.find("OPEN")!=-1:
AP_ENCTYPE=fcolor.IBlue + AP_ENCTYPE
AP_BSSID=SNLColor + AP_BSSID
if AP_ENCTYPE.find("WEP")!=-1:
AP_ENCTYPE=fcolor.ICyan + AP_ENCTYPE
AP_BSSID=SNLColor + AP_BSSID
AP_BSSIDList.append(str(AP_BSSID))
AP_FREQList.append(str(AP_FREQ))
AP_QUALITYList.append(SNLColor + str(AP_QUALITY))
AP_SIGNALList.append(SNLColor + str(AP_SIGNAL))
AP_ENCKEYList.append(str(AP_ENCKEY))
AP_ESSIDList.append(str(BSNLColor + AP_ESSID))
AP_MODEList.append(str(AP_MODE))
AP_CHANNELList.append(str(AP_CHANNEL))
AP_ENCTYPEList.append(str(AP_ENCTYPE))
AP_BSSID=""
AP_FREQ=""
AP_QUALITY=""
AP_CHANNEL=""
AP_SIGNAL=""
AP_ENCKEY=""
AP_ESSID=""
AP_MODE=""
AP_ENCTYPE=""
def SelectInterfaceToUse():
printc ("i", fcolor.BRed + "Wireless Adapter Selection","")
Result = GetInterfaceList("MAN")
if Result==0:
printc ("!", fcolor.SRed + "No wireless adapter adapter found !!","")
exit()
Result = CombineListing(IFaceList, MACList,UpDownList,IEEEList,StatusList,ModeList,"","")
if int(Result)>1:
TitleList=['Sel','Iface','MAC Address','Up ?', 'IEEE','Status','Mode','','']
Result=QuestionFromList(TitleList, MergedSpaceList,MergedList,"Select the interface from the list","0")
if Result=="0":
Result=AskQuestion(fcolor.SGreen + "You need to select a interface to use," + fcolor.BGreen + " retry ?","Y/n","U","Y","1")
if Result=="Y":
Result=SelectInterfaceToUse()
return Result
else:
exit(0)
Result=int(Result)-1
SELECTED_IFACE=IFaceList[int(Result)]
else:
SELECTED_IFACE=IFaceList[0]
return SELECTED_IFACE;
def Run(cmdRun,Suppress):
if Suppress=="":
Suppress="1"
rtncode=-1
cmdExt=""
if cmdRun=="":
return rtncode;
if cmdRun.find(">")!=-1 or cmdRun.find(">>")!=-1:
Suppress="0"
if Suppress=="1":
cmdExt=" > /dev/null 2>&1"
ps=Popen(str(cmdRun) + str(cmdExt), shell=True, stdout=subprocess.PIPE, stderr=open(os.devnull, 'w'),preexec_fn=os.setsid)
pid=ps.pid
readout=ps.stdout.read()
return str(readout)
def SelectMonitorToUse():
time.sleep (0)
MonCt = GetInterfaceList("MON")
if MonCt==0:
printc ("i", fcolor.BRed + "Monitoring Adapter Selection","")
MonCt = GetInterfaceList("MON")
if MonCt==0:
printc ("!", fcolor.SRed + "No monitoring adapter found !!","")
exit()
Result = CombineListing(IFaceList, MACList,UpDownList,IEEEList,StatusList,"","","")
if int(Result)>1:
TitleList=['Sel','Iface','MAC Address','Up ?', 'IEEE','Status','','','']
Result=QuestionFromList(TitleList, MergedSpaceList,MergedList,"Select the monitoring interface from the list","0")
if Result=="0":
Result=AskQuestion(fcolor.SGreen + "You need to select a monitoring interface to use," + fcolor.BGreen + " retry ?","Y/n","U","Y","1")
if Result=="Y":
Result=SelectMonitorToUse()
return Result
else:
exit(0)
Result=int(Result)-1
SELECTED_MON=IFaceList[int(Result)]
else:
SELECTED_MON=IFaceList[0]
return SELECTED_MON;
def CheckRequiredFiles():
FCheck=Run("locate -n 3 aircrack-ng | sed -n '1p'","0")
if FCheck=="":
printc ("!!!","Aircrack-NG suite must be installed inorder to use the Wireless IDS !","")
exit (0)
FCheck=Run("locate -n 3 tshark | sed -n '1p'","0")
if FCheck=="":
printc ("!!!","Aircrack-NG suite must be installed inorder to use the Wireless IDS !","")
exit (0)
if IsFileDirExist(macoui)!="F":
printc ("!!!","MAC OUI Database not found !","")
printc (" ",fcolor.SGreen + "You can download it @ " + fcolor.SBlue + "https//raw2.github.com/SYWorks/wireless-ids/master/mac-oui.db\n","")
def AddTime(tm, secs):
fulldate = datetime.datetime(tm.year, tm.month, tm.day, tm.hour, tm.minute, tm.second)
fulldate = fulldate + datetime.timedelta(seconds=secs)
return fulldate
def Percent(val, digits):
val *= 10 ** (digits + 2)
return '{1:.{0}f} %'.format(digits, floor(val) / 10 ** digits)
class Command(object):
def __init__(self, cmd):
self.cmd = cmd
self.process = None
def run(self, timeout):
def target():
printd ("Thread started")
self.process = subprocess.Popen(self.cmd, shell=True)
self.process.communicate()
printd ("Thread Finish")
thread = threading.Thread(target=target)
thread.start()
thread.join(timeout)
if thread.is_alive():
printd ("Terminating process..")
self.process.terminate()
thread.join()
printd ("Process Terminated")
def ChangeHex(n):
x = (n % 16)
c = ""
if (x < 10):
c = x
if (x == 10):
c = "A"
if (x == 11):
c = "B"
if (x == 12):
c = "C"
if (x == 13):
c = "D"
if (x == 14):
c = "E"
if (x == 15):
c = "F"
if (n - x != 0):
Result=ChangeHex(n / 16) + str(c)
else:
Result=str(c)
if len(Result)==1:
Result="0" + str(Result)
if len(Result)==3:
Result=Result[-2:]
return Result
def SpoofMAC(SELECTED_IFACE,ASSIGNED_MAC):
if ASSIGNED_MAC=="":
H1="00"
H2=ChangeHex(randrange(255))
H3=ChangeHex(randrange(255))
H4=ChangeHex(randrange(255))
H5=ChangeHex(randrange(255))
H6=ChangeHex(randrange(255))
ASSIGNED_MAC=str(H1) + ":" + str(H2) + ":" + str(H3) + ":" + str(H4) + ":" + str(H5) + ":" + str(H6)
Result=""
ps=subprocess.Popen("ifconfig " + str(SELECTED_IFACE) + " | grep 'HWaddr' | tr -s ' ' | cut -d ' ' -f5" , shell=True, stdout=subprocess.PIPE, stderr=open(os.devnull, 'w'))
MACADDR=ps.stdout.read().replace("\n","").upper().replace("-",":")
MACADDR=MACADDR[:17]
if str(MACADDR)!=ASSIGNED_MAC:
printc ("i",fcolor.BRed + "Spoofing [ " + str(SELECTED_IFACE) + " ] MAC Address","")
printc (" ",fcolor.BBlue + "Existing MAC\t: " + fcolor.BWhite + str(MACADDR),"")
printc (" ",fcolor.BBlue + "Spoof MAC\t\t: " + fcolor.BWhite + str(ASSIGNED_MAC),"")
Result=MACADDR
Ask=AskQuestion("Continue to spoof the MAC Address ?","Y/n","U","Y","0")
if Ask=="Y":
ps=subprocess.Popen("ifconfig " + str(SELECTED_IFACE) + " down hw ether " + str(ASSIGNED_MAC) + " > /dev/null 2>&1" , shell=True, stdout=subprocess.PIPE,stderr=open(os.devnull, 'w'))
ps=subprocess.Popen("ifconfig " + str(SELECTED_IFACE) + " up > /dev/null 2>&1" , shell=True, stdout=subprocess.PIPE,stderr=open(os.devnull, 'w'))
time.sleep(1)
ps=subprocess.Popen("ifconfig " + str(SELECTED_IFACE) + " | grep 'HWaddr' | tr -s ' ' | cut -d ' ' -f5" , shell=True, stdout=subprocess.PIPE)
NEWADDR=""
NEWADDR=ps.stdout.read().replace("\n","").upper().replace("-",":")
NEWADDR=NEWADDR[:17]
if str(NEWADDR)==str(ASSIGNED_MAC):
printc (" ",fcolor.BBlue + "MAC Address successfully changed to [ " + fcolor.BYellow + str(ASSIGNED_MAC) + fcolor.BBlue + " ]","")
Result=str(ASSIGNED_MAC)
else:
printc (" ",fcolor.BRed + "Failed to change MAC Address !!","")
Ask=AskQuestion("Retry with a new MAC Address ?","Y/n","U","Y","0")
if Ask=="Y":
Result=SpoofMAC(SELECTED_IFACE,"")
return Result;
else:
printc (" ",fcolor.BRed + "You choose to abort spoofing of MAC address.","")
printc (" ",fcolor.BBlue + "Using MAC Address [ " + fcolor.BYellow + str(NEWADDR) + fcolor.BBlue + " ]","")
return Result
else:
printc (" ",fcolor.BRed + "You choose to abort spoofing of MAC address.","")
printc (" ",fcolor.BBlue + "Using MAC Address [ " + fcolor.BYellow + str(MACADDR) + fcolor.BBlue + " ]","")
return Result
class Command(object):
def __init__(self, cmd):
self.cmd = cmd
self.process = None
def run(self, timeout):
def target():
printd ("Thread started")
self.process = subprocess.Popen(self.cmd, shell=True)
self.process.communicate()
printd ("Thread Finish")
thread = threading.Thread(target=target)
thread.start()
thread.join(timeout)
if thread.is_alive():
printd ("Terminating process..")
self.process.terminate()
thread.join()
printd ("Process Terminated")
def CaptureTraffic():
global pid1
pid1=""
captured_pcap=tmpdir + "captured"
tcpdump_log=tmpdir + "tcpdump.log"
tcpdump_cap=tmpdir + "tcpdump.cap"
Result=DelFile(captured_pcap + "*","0")
Result=DelFile(tcpdump_cap + "*","0")
TimeOut=TIMEOUT
TimeOut=float(TIMEOUT)
mcmd1="airodump-ng " + SELECTED_MON + " -w " + str(captured_pcap) + " > /dev/null 2>&1"
mcmd2="tshark -i " + str(SELECTED_MON) + " -w " + str(tcpdump_cap) + " -n -t ad -a duration:" + str(TIMEOUT) + " > /dev/null 2>&1"
ps2=subprocess.Popen(mcmd2 , shell=True, stdout=subprocess.PIPE, preexec_fn=os.setsid)
ps1=subprocess.Popen(mcmd1 , shell=True, stdout=subprocess.PIPE, preexec_fn=os.setsid)
printc ("@",fcolor.SGreen + "Refreshing after " + fcolor.BYellow + str(TimeOut) + fcolor.SGreen + " seconds... please wait..",TimeOut)
pid1=ps1.pid
pid2=ps2.pid
os.killpg(pid1, signal.SIGTERM)
os.killpg(pid2, signal.SIGTERM)
time.sleep(0.1)
ts = time.time()
DateTimeStamp=datetime.datetime.fromtimestamp(ts).strftime('%d/%m/%Y %H:%M:%S')
if IsFileDirExist(tcpdump_cap)=="F":
statinfo = os.stat(tcpdump_cap)
filesize=statinfo.st_size
if filesize<300:
printc ("i","" + "" + fcolor.BYellow + DateTimeStamp + " - " + fcolor.SRed + "Captured packets size is too small... please make sure the monitoring interfaceing is working ...","")
else:
printc ("!!!", "Couldn't find captured file.. retrying again..","")
CaptureTraffic()
def GetMACOUI(MACAddr,Display):
if Display=="":
Display="1"
Result=""
OUI=""
if len(MACAddr)==17:
MACAddrO=MACAddr
MACAddr=MACAddr.replace(":","")
if IsFileDirExist(macoui)=="F":
with open(macoui,'r') as rf:
elines = rf.readlines()
for eline in elines:
eline=eline.replace("\n","")
OUI_MAC =eline.split(' ')[0]
lOUI_MAC=len(OUI_MAC)
if len(OUI_MAC)>0:
if MACAddr[:lOUI_MAC] in eline:
lOUI_MAC=lOUI_MAC+1
OUI=eline[lOUI_MAC:]
if Display=="1":
printc (" ",fcolor.SWhite + "[ " + fcolor.SGreen + str(MACAddrO) + fcolor.SWhite + " ]'s MAC OUI belongs to [ " + fcolor.SYellow + str(OUI) + fcolor.BWhite + " ].","")
else:
Result=" " + fcolor.SWhite + "[ " + fcolor.SGreen + str(MACAddrO) + fcolor.SWhite + " ]'s MAC OUI belongs to [ " + fcolor.SYellow + str(OUI) + fcolor.BWhite + " ]."
return Result
if Display=="1":
printc (" ",fcolor.BIGray + "[ " + fcolor.BBlue + str(MACAddrO) + fcolor.BIGray + " ]'s MAC OUI is not found in MAC OUI Database.","")
else:
Result=" " + fcolor.SWhite + "[ " + fcolor.SGreen + str(MACAddrO) + fcolor.SWhite + " ]'s MAC OUI belongs to [ " + fcolor.SYellow + str(OUI) + fcolor.BWhite + " ]."
return Result
def GetEncryptType(AFMAC):
Privacy=""
captured_csv=tmpdir + "CapturedListing.csv"
if IsFileDirExist(captured_csv)=="F" and AFMAC!="":
ModiESSID=""
CLIENTS=""
with open(captured_csv,"r") as f:
for line in f:
line=line.replace("\n","")
line=line.replace("\00","")
if len(line)>10 and line.find(str(AFMAC))!=-1 and CLIENTS!=1:
line=line + " ., ., ., ., ., ., ., ., ., ., ., ., ., ., ., ., ., ., ., ., ., "
line=line.replace("\r","")
CList=line.split(",")
FMAC=line.split()[0].replace(',','')
FS1=line.split()[0].replace(',','')
FS2=line.split()[1].replace(',','')
FS=str(FS1) + " " + str(FS2)
Privacy=CList[5].lstrip().rstrip()
Cipher=CList[6].lstrip().rstrip()
Authentication=CList[7].lstrip().rstrip()
Power=CList[8].lstrip().rstrip()
ESSID=CList[13].lstrip().rstrip().replace("\n","")
SMAC=CList[5].lstrip().rstrip()
Privacy=Privacy.replace('WPA2WPA OPN','WPA2WPA (OPN)')
Privacy=Privacy.replace('WPA2 OPN','WPA2 (OPN)')
Privacy=Privacy.replace('WPA OPN','WPA (OPN)')
Privacy=Privacy.replace('WPA2WPA','WPA2/WPA')
Privacy=Privacy.replace('WEP OPN','WEP (OPN)')
Cipher=Cipher.replace('CCMP TKIP','CCMP/TKIP')
CLIENTS=1
return Privacy
def GetMACDetail(FrMAC,ToMAC,AType,PDisplay):
global CList
CList=[]
global Privacy
global Cipher
Privacy=""
PrivacyBK=""
Cipher=""
CipherBK=""
AuthenticationBK=""
global MACDetail
MACResult=""
MACDetail=""
CLIENTS=0
captured_csv=tmpdir + "CapturedListing.csv"
ESSID_log=tmpdir + "ESSID.log"
essidfile=tmpdir + "essidcount.log"
ESSIDCt=[]
if IsFileDirExist(captured_csv)=="F":
ModiESSID=""
with open(captured_csv,"r") as f:
for line in f:
line=line.replace("\n","")
line=line.replace("\00","")
if len(line)>10:
line=line + " ., ., ., ., ., ., ., ., ., ., ., ., ., ., ., ., ., ., ., ., ., "
line=line.replace("\r","")
CList=line.split(",")
FMAC=line.split()[0].replace(',','')
FS1=line.split()[0].replace(',','')
FS2=line.split()[1].replace(',','')
FS=str(FS1) + " " + str(FS2)
Privacy=CList[5].lstrip().rstrip()
Cipher=CList[6].lstrip().rstrip()
Authentication=CList[7].lstrip().rstrip()
Power=CList[8].lstrip().rstrip()
ESSID=CList[13].lstrip().rstrip().replace("\n","")
SMAC=CList[5].lstrip().rstrip()
Privacy=Privacy.replace('WPA2WPA OPN','WPA2WPA (OPN)')
Privacy=Privacy.replace('WPA2 OPN','WPA2 (OPN)')
Privacy=Privacy.replace('WPA OPN','WPA (OPN)')
Privacy=Privacy.replace('WPA2WPA','WPA2/WPA')
Privacy=Privacy.replace('WEP OPN','WEP (OPN)')
Cipher=Cipher.replace('CCMP TKIP','CCMP/TKIP')
if FS=="Station MAC":
CLIENTS=1
if len(FMAC)==17:
if AType=="RAP":
MAXCt=0
MAXName=""
with open(essidfile,'r+') as rf:
elines = rf.readlines()
rf.seek(0)
rf.truncate()
for eline in elines:
eline=eline.replace("\n","")
if FrMAC in eline:
ED_MAC =eline.split(', ')[0]
ED_NAME=eline.split(', ')[1]
ED_CT = eline.split(', ')[2]
if int(ED_CT)>MAXCt:
MAXCt=ED_CT
MAXName=ED_NAME
rf.write(eline + "\n")
if MAXName!="":
ESSID=MAXName
if FrMAC.find(str(FMAC))!=-1:
CMAC=""
if ToMAC=="FF:FF:FF:FF:FF:FF":
ToMAC="Broadcast"
if CLIENTS!=1 and Privacy!="":
if ESSID=="":
ESSID=fcolor.IGray + "<<No ESSID>>"
CMAC="1"
if FMAC!="FF:FF:FF:FF:FF:FF" and FMAC!="Broadcast" and FMAC!="(not associated)" and ESSID!=fcolor.IGray + "<<No ESSID>>":
if PDisplay=="":
printc (" ",fcolor.BWhite + "[ " + fcolor.BBlue + str(FMAC) + fcolor.BWhite + " ]'s SSID Name is [ " + fcolor.BBlue + str(ESSID) + fcolor.BWhite + " ] and Privicy=" + fcolor.BRed + str(Privacy) + fcolor.BWhite + " Cipher=" + fcolor.BRed + str(Cipher) + fcolor.BWhite + " Authentication=" + fcolor.BRed + str(Authentication) + fcolor.BWhite + " Power=" + fcolor.BRed + str(Power) + fcolor.BWhite + "","")
else:
MACDetail=MACDetail + printc (" ","<$rs$>" + fcolor.BWhite + "[ " + fcolor.BBlue + str(FMAC) + fcolor.BWhite + " ]'s SSID Name is [ " + fcolor.BBlue + str(ESSID) + fcolor.BWhite + " ] and Privicy=" + fcolor.BRed + str(Privacy) + fcolor.BWhite + " Cipher=" + fcolor.BRed + str(Cipher) + fcolor.BWhite + " Authentication=" + fcolor.BRed + str(Authentication) + fcolor.BWhite + " Power=" + fcolor.BRed + str(Power) + fcolor.BWhite + "","") + "\n"
PrivacyBK=Privacy
CipherBK=Cipher
AuthenticationBK=Authentication
if SMAC!="" and CLIENTS==1:
if SMAC=="(not associated)":
if PDisplay=="":
printc (" ",fcolor.SGreen + "[ " + fcolor.SWhite + str(FMAC) + fcolor.SGreen + " ] is not associated access point.","")
else:
MACDetail=MACDetail + printc (" ","<$rs$>" + fcolor.SGreen + "[ " + fcolor.SWhite + str(FMAC) + fcolor.SGreen + " ] is not associated access point.","") + "\n"
CMAC="1"
else:
if PDisplay=="":
printc (" ",fcolor.BWhite + "[ " + fcolor.BCyan + str(FMAC) + fcolor.BWhite + " ] is associated with access point [ " + fcolor.BCyan + str(SMAC) + fcolor.BWhite + " ]","")
GetMACOUI(SMAC,"")
else:
MACDetail=MACDetail + printc (" ","<$rs$>" + fcolor.BWhite + "[ " + fcolor.BCyan + str(FMAC) + fcolor.BWhite + " ] is associated with access point [ " + fcolor.BCyan + str(SMAC) + fcolor.BWhite + " ]","") + "\n"
MACDetail=MACDetail + GetMACOUI(SMAC,"0") + "\n"
if PDisplay=="":
RESSID=GetESSID(SMAC)
else:
RESSID=GetESSIDOnlyText(SMAC)
if RESSID!="":
MACDetail=MACDetail + str(RESSID) + "\n"
CMAC="1"
if CMAC=="1":
if PDisplay=="":
GetMACOUI(FMAC,"")
else:
MACDetail=MACDetail + GetMACOUI(FMAC,"0") + "\n"
if FrMAC.find(str(SMAC))!=-1:
if FMAC!="" and CLIENTS==1 and SMAC!="(not associated)":
if PDisplay=="":
printc (" ",fcolor.BWhite + "[ " + fcolor.BCyan + str(FMAC) + fcolor.BWhite + " ] is associated with client [ " + fcolor.BCyan + str(SMAC) + fcolor.BWhite + " ]","")
GetMACOUI(FMAC,"")
else:
MACDetail=MACDetail + printc (" ","<$rs$>" + fcolor.BWhite + "[ " + fcolor.BCyan + str(FMAC) + fcolor.BWhite + " ] is associated with client [ " + fcolor.BCyan + str(SMAC) + fcolor.BWhite + " ]","") + "\n"
MACDetail=MACDetail + GetMACOUI(FMAC,"0") + "\n"
if ToMAC.find(str(FMAC))!=-1:
if CLIENTS!=1:
if ESSID=="":
ESSID=fcolor.IGray + "<<No ESSID>>"
if FMAC!="FF:FF:FF:FF:FF:FF" and FMAC!="Broadcast" and FMAC!="(not associated)" and ESSID!=fcolor.IGray + "<<No ESSID>>":
if PDisplay=="":
printc (" ",fcolor.BWhite + "[ " + fcolor.BBlue + str(FMAC) + fcolor.BWhite + " ]'s SSID Name is [ " + fcolor.BBlue + str(ESSID) + fcolor.BWhite + " ] and Privicy=" + fcolor.BRed + str(Privacy) + fcolor.BWhite + " Cipher=" + fcolor.BRed + str(Cipher) + fcolor.BWhite + " Authentication=" + fcolor.BRed + str(Authentication) + fcolor.BWhite + " Power=" + fcolor.BRed + str(Power) + fcolor.BWhite + "","")
GetMACOUI(FMAC,"")
else:
MACDetail=MACDetail + printc (" ","<$rs$>" + fcolor.BWhite + "[ " + fcolor.BBlue + str(FMAC) + fcolor.BWhite + " ]'s SSID Name is [ " + fcolor.BBlue + str(ESSID) + fcolor.BWhite + " ] and Privicy=" + fcolor.BRed + str(Privacy) + fcolor.BWhite + " Cipher=" + fcolor.BRed + str(Cipher) + fcolor.BWhite + " Authentication=" + fcolor.BRed + str(Authentication) + fcolor.BWhite + " Power=" + fcolor.BRed + str(Power) + fcolor.BWhite + "","") + "\n"
MACDetail=MACDetail + GetMACOUI(FMAC,"0") + "\n"
PrivacyBK=Privacy
CipherBK=Cipher
AuthenticationBK=Authentication
else:
if SMAC!="":
if SMAC=="(not associated)":
if PDisplay=="":
printc (" ",fcolor.SGreen + "[ " + fcolor.SWhite + str(FMAC) + fcolor.SGreen + " ] is not associated access point.","")
else:
MACDetail=MACDetail + printc (" ","<$rs$>" + fcolor.SGreen + "[ " + fcolor.SWhite + str(FMAC) + fcolor.SGreen + " ] is not associated access point.","") + "\n"
if FMAC!="FF:FF:FF:FF:FF:FF" and FMAC!="Broadcast" and FMAC!="(not associated)":
if PDisplay=="":
GetMACOUI(FMAC,"")
RESSID=GetESSID(FMAC)
else:
MACDetail=MACDetail + GetMACOUI(FMAC,"0") + "\n"
RESSID=GetESSIDOnlyText(FMAC)
if RESSID!="":
MACDetail=MACDetail + str(RESSID) + "\n"
else:
if PDisplay=="":
printc (" ",fcolor.BWhite + "[ " + fcolor.BCyan + str(FMAC) + fcolor.BWhite + " ] is associated with access point [ " + fcolor.BCyan + str(SMAC) + fcolor.BWhite + " ]","")
GetMACOUI(SMAC,"")
else:
MACDetail=MACDetail + printc (" ","<$rs$>" + fcolor.BWhite + "[ " + fcolor.BCyan + str(FMAC) + fcolor.BWhite + " ] is associated with access point [ " + fcolor.BCyan + str(SMAC) + fcolor.BWhite + " ]","") + "\n"
MACDetail=MACDetail + GetMACOUI(SMAC,"0") + "\n"
if SMAC!="FF:FF:FF:FF:FF:FF" and SMAC!="Broadcast" and SMAC!="(not associated)":
if PDisplay=="":
RESSID=GetESSID(SMAC)
else:
RESSID=GetESSIDOnlyText(FMAC)
if RESSID!="":
MACDetail=MACDetail + str(RESSID) + "\n"
ESSID=ESSID.lstrip().rstrip().replace("\r","").replace("\n","")
if CLIENTS!=1:
SkipESSID=0
if IsFileDirExist(ESSID_log)!="F":
open(ESSID_log,"wb").write("" )
else:
with open(ESSID_log,"r") as f:
for line in f:
line=line.replace("\n","")
if len(line)>=18:
if line.find(FMAC)!=-1:
SkipESSID=1
FileESSID=line.replace(FMAC,"").replace("\t","")
FileESSID=FileESSID.lstrip().rstrip().replace("\r","").replace("\n","")
if SkipESSID==0 and ESSID!="":
if FMAC!="BSSID":
open(ESSID_log,"a+b").write("" + str(FMAC) + "\t" + str(ESSID) + "\n")
if SkipESSID==1 and ESSID!="":
if FileESSID!=ESSID:
ModiESSID=ModiESSID + fcolor.BGreen + "ESSID of [ " + fcolor.BBlue + str(FMAC) + fcolor.BGreen + " ] changed from [ " + fcolor.BRed + str(FileESSID) + fcolor.BGreen + " ] to [ " + fcolor.BRed + str(ESSID) + fcolor.BGreen + " ].\n"
if len(Privacy)==17:
Privacy=GetEncryptType(Privacy)
PrivacyBK=Privacy
else:
if Privacy=="" or Privacy=="(not associated)":
Privacy=GetEncryptType(FMAC)
PrivacyBK=Privacy
PrivacyBK=PrivacyBK.lstrip().rstrip()
CipherBK=CipherBK.lstrip().rstrip()
AuthenticationBK=AuthenticationBK.lstrip().rstrip()
if PrivacyBK!="" and PrivacyBK.find("WPA")!=-1:
if PrivacyBK.find("WEP")!=-1:
if CipherBK.find("WEP")!=-1:
PrivacyGeneral="WEP"
else:
PrivacyGeneral="WPA"
else:
PrivacyGeneral="WPA"
else:
PrivacyGeneral=PrivacyBK
PrivacyGeneral=PrivacyGeneral.lstrip().rstrip()
return PrivacyGeneral + ", " + str(PrivacyBK) + ", " + str(CipherBK) + ", " + str(AuthenticationBK)
def RemoveUnwantMAC(MACAddr):
sMAC=[]
sMAC=MACAddr.split("/")
x=0
lsMAC=len(sMAC)
while x<lsMAC:
MAC_ADR=sMAC[x]
MAC_ADR=MAC_ADR.lstrip().rstrip()
sMAC[x]=MAC_ADR
if MAC_ADR[:12]=="FF:FF:FF:FF:":
sMAC[x]=""
if MAC_ADR[:6]=="33:33:":
sMAC[x]=""
if MAC_ADR[:9]=="01:80:C2:":
sMAC[x]=""
if MAC_ADR[:9]=="01:00:5E:":
sMAC[x]=""
if MAC_ADR[:3]=="FF:":
sMAC[x]=""
if MAC_ADR==MyMAC:
sMAC[x]=""
x=x+1
x=0
NewMAC=""
while x<len(sMAC):
if sMAC[x]!="":
NewMAC=NewMAC + str(sMAC[x]) + " / "
x=x+1
if NewMAC[-3:]==" / ":
NewMAC=NewMAC[:-3]
return NewMAC
def GetAPDetail(MAC_Addr,ESSIDName):
ReturnTxt=""
newcaptured=tmpdir + "CapturedListing.csv"
CLIENTS=""
ClientCt=0
with open(newcaptured,"r") as f:
for line in f:
line=line.replace("\n","")
line=line.replace("\00","")
if len(line)>5:
line=line + " ., ., ., ., ., ., ., ., ., ., ., ., ., ., ., ., ., ., ., ., ., "
line=line.replace("\r","")
CList=line.split(",")
FMAC=line.split()[0].replace(',','')
FS1=line.split()[0].replace(',','')
FS2=line.split()[1].replace(',','')
FS=str(FS1) + " " + str(FS2)
Channel=CList[3].lstrip().rstrip()
Speed=CList[4].lstrip().rstrip()
Power=CList[8].lstrip().rstrip()
Privacy=CList[5].lstrip().rstrip()
Cipher=CList[6].lstrip().rstrip()
Authentication=CList[7].lstrip().rstrip()
Power=CList[8].lstrip().rstrip()
ESSID=CList[13].lstrip().rstrip().replace("\n","")
SMAC=CList[5].lstrip().rstrip()
Privacy=Privacy.replace('WPA2WPA OPN','WPA2/WPA (OPN)')
Privacy=Privacy.replace('WPA2 OPN','WPA2 (OPN)')
Privacy=Privacy.replace('WPA OPN','WPA (OPN)')
Privacy=Privacy.replace('WPA2WPA','WPA2/WPA')
Privacy=Privacy.replace('WEP OPN','WEP (OPN)')
Cipher=Cipher.replace('CCMP TKIP','CCMP/TKIP')
ESSID=CheckSSIDChr(ESSID)
if FS=="Station MAC":
CLIENTS="1"
if MAC_Addr==FMAC and CLIENTS=="":
lblcolor=fcolor.BGreen
txtcolor=fcolor.BWhite
lblcolor2=fcolor.BIBlue
txtcolor2=fcolor.BIYellow
ltxtcolor=fcolor.SWhite
ltxtcolor2=fcolor.SYellow
if Power!="" and Power!="-1":
Power=Power.replace("-","").lstrip().rstrip()
Power=100-int(Power)
ReturnTxt1=lblcolor + " Privacy : " + txtcolor + str(Privacy).ljust(15) + lblcolor + "Cipher : " + txtcolor + str(Cipher).ljust(12) + lblcolor + "Auth : " + txtcolor + str(Authentication).ljust(8) + lblcolor + "ESSID : " + txtcolor + str(ESSIDName) + "\n"
ReturnTxt2=lblcolor2 + "Channel : " + txtcolor2 + str(Channel).ljust(15) + lblcolor2 + "Speed : " + txtcolor2 + str(Speed) + ltxtcolor2 + " MB".ljust(10) + lblcolor2 + "Power : " + txtcolor2 + str(Power).ljust(8) + "\n"
if CLIENTS=="1":
if SMAC==MAC_Addr:
ClientCt=ClientCt+1
ReturnTxt=ReturnTxt1 + lblcolor2 + " Client : " + txtcolor2 + str(ClientCt) + ltxtcolor2 + " client".ljust(20) + ReturnTxt2
return ReturnTxt;
def CheckSimilarESSID():
x=0
global BSSIDListA
global ESSIDListA
SimilarName=""
BSSIDListA=[]
ESSIDListA=[]
tl=len(ESSIDList)
while x<tl:
FoundName="1"
sl=len(ESSIDList)
y=x+1
FoundName=""
while y<sl:
if ESSIDList[y]==ESSIDList[x] and ESSIDList[x]!="" and ESSIDList[x]!="." and BSSIDList[x]!=BSSIDList[y] and SimilarName.find(BSSIDList[y])==-1:
lblcolor=fcolor.BGreen
txtcolor=fcolor.BWhite
lblcolor2=fcolor.BIBlue
txtcolor2=fcolor.BIYellow
if FoundName=="":
APResult=GetAPDetail(BSSIDList[x],ESSIDList[x])
Text1=lblcolor + "BSSID : " + txtcolor + str(BSSIDList[x]) + str(APResult) # + lblcolor + "ESSID : " + txtcolor + str(ESSIDList[x]) + "\n"
FoundName="1"
SimilarName = SimilarName + " " + Text1
APResult=GetAPDetail(BSSIDList[y],ESSIDList[y])
Text2=lblcolor + "BSSID : " + txtcolor + str(BSSIDList[y]) + str(APResult) + "" + fcolor.Black + ""
SimilarName = SimilarName + " " + Text2
FoundName="1"
y=y+1
if FoundName=="1":
SimilarName = SimilarName + " " + fcolor.Black + "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n"
FoundName=""
x=x+1
if SimilarName!="":
printc (" ","","")
printc ("i",fcolor.BUCyan + "Access Point Using The Same Name" + fcolor.CReset,"")
print SimilarName
if PrintToFile=="1":
open(LogFile,"a+b").write(RemoveColor(str(SimilarName)) + "\n")
def AnalyseCaptured():
global List_FrMAC
global List_ToMAC
global List_Data
global List_Auth
global List_Deauth
global List_Assoc
global List_Reassoc
global List_Disassoc
global List_RTS
global List_CTS
global List_ACK
global List_EAPOL
global List_WPS
global List_Beacon
global List_SSID
global List_SSIDCT
global List_IsAP
global List_PResp
global List_PReq
global List_ProbeName
global List_NULL
global List_QOS
global List_Data86
global List_Data98
global List_Data94
global MACDetail
MACDetail=""
List_FrMAC=[]
List_ToMAC=[]
List_Data=[]
List_Data86=[]
List_Data98=[]
List_Data94=[]
List_Auth=[]
List_Deauth=[]
List_Assoc=[]
List_Reassoc=[]
List_Disassoc=[]
List_RTS=[]
List_CTS=[]
List_ACK=[]
List_EAPOL=[]
List_WPS=[]
List_Beacon=[]
List_SSID=[]
List_SSIDCT=[]
List_IsAP=[]
List_PResp=[]
List_PReq=[]
List_ProbeName=[]
List_NULL=[]
List_QOS=[]
BAK_FR_MAC=""
essidfile=tmpdir + "essidcount.log"
macfile=tmpdir + "macadrcount.log"
tcpdump_log=tmpdir + "tcpdump.log"
resultlog=tmpdir + "Result.log"
resultlist=tmpdir + "ResultList.log"
open(essidfile,"wb").write("")
open(macfile,"wb").write("")
linecount=0
if IsFileDirExist(tcpdump_log)!="F":
printc ("!!!","Converted file not found ..","")
retrun
open(resultlog,"wb").write(tcpdump_log + "\n")
TotalLine=GetFileLine(tcpdump_log,"0")
BRes=0
DisplayCt=0
with open(tcpdump_log,"r") as f:
for line in f:
linecount=linecount+1
DisplayCt=DisplayCt+1
if DisplayCt>50:
completed=Percent(linecount / float(TotalLine),2)
BRes=printl(fcolor.SGreen + "Analysing Packets : " + str(completed) + ".." ,"2",BRes)
DisplayCt=0
line=line.replace("\n","")
line=line.replace("(TA)","")
line=line.replace("(RA)","")
line=line.replace("(BSSID)","")
sl=len(line)
if sl>=15:
line=line.replace("[Malformed Packet]", "")
line=line + ", ., ., ., ., ., ., ., ., ., ., ., ., ., ., ., ., ., ., ., ., ., ., ., ., ., ., ., ., ., ., ., ., ., ., ., ., ., ., ., ., ., ., ., .,"
line=line.replace("\r","")
FoundType=""
STYPE=""
DTYPE=""
DTYPE2=""
DTYPE3=""
SSID=""
PSSID=""
AESSID=""
FR_MAC=line.split()[3].replace(',','').upper()
TO_MAC=line.split()[5].replace(',','').upper()
TO_MAC2=line.split()[4].replace(',','').upper()
DTYPE=line.split()[8].replace(',','').replace(')','').upper()
DTYPE2=line.split()[7].replace(',','').replace('(','').upper()
DTYPE3=line.split()[9].replace(',','').replace('(','').upper()
WPS1=line.split()[6].replace(',','').replace('(','').upper()
WPS2=line.split()[11].replace(',','').replace('(','').upper()
WPS3=line.split()[12].replace(',','').replace('(','').upper()
SSID=line.split(', ')[5].replace(',','').replace('(','')
PSSID=line.split(', ')[4].replace(',','').replace('(','')
ATO_MAC=""
if len(TO_MAC)==17:
ATO_MAC=TO_MAC
if len(TO_MAC2)==17:
ATO_MAC=TO_MAC2
WPS2=WPS2.replace("FLAGS=","")
WPS3=WPS3.replace("FLAGS=","")
if SSID==".":
SSID=""
if PSSID!="" and PSSID[:5]=="SSID=":
if PSSID[-18:]=="[Malformed Packet]":
PSSID=PSSID[:-18]
PSSID=PSSID[5:]
else:
PSSID=""
if SSID!="" and SSID[:5]=="SSID=":
if SSID[-18:]=="[Malformed Packet]":
print "Found [Malformed Packet]"
SSID=SSID[:-18]
SSID=SSID[5:]
AESSID=SSID
SSID=CheckSSIDChr(SSID)
if line.find(str('EAPOL'))!=-1:
DTYPE=line.split()[6].replace(',','').replace(')','').upper()
if len(FR_MAC)==17 and len(TO_MAC)==17:
FoundType=1
STYPE=DTYPE
if len(TO_MAC2)==17:
FoundType=2
STYPE=DTYPE2
if len(FR_MAC)!=17 and len(TO_MAC)!=17 and len(TO_MAC2)!=17:
FoundType=3
STYPE=DTYPE2
DTYPEA=str(DTYPE2) + " " + str(DTYPE)
if DTYPEA=="RESERVED FRAME":
STYPE=DTYPEA
if DTYPE=="NULL" and DTYPE3=="FUNCTION":
DTYPEA=str(DTYPE) + " " + str(DTYPE3)
STYPE=DTYPEA + ""
if DTYPE=="BEACON" and DTYPE3=="FRAME":
DTYPEA=str(DTYPE) + " " + str(DTYPE3)
STYPE=DTYPEA
FOUND_REC=""
if SSID!="" and len(FR_MAC)==17:
with open(essidfile,'r+') as essidf:
elines = essidf.readlines()
essidf.seek(0)
essidf.truncate()
for eline in elines:
eline=eline.replace("\n","")
if FR_MAC in eline:
ED_MAC =eline.split(', ')[0] #.replace(',','')
ED_NAME=eline.split(', ')[1] #.replace(',','')
ED_CT = eline.split(', ')[2] #.replace(',','')
if ED_NAME==SSID:
ED_CT=int(ED_CT)+1
eline=str(FR_MAC) + ", " + str(SSID) + ", " + str(ED_CT)
FOUND_REC=1
essidf.write(eline + "\n")
if FOUND_REC=="":
essidf.write(FR_MAC + ", " + SSID + ", 1")
FOUND_REC=""
if len(FR_MAC)==17 and len(ATO_MAC)==17:
with open(macfile,'r+') as rf:
elines = rf.readlines()
rf.seek(0)
rf.truncate()
for eline in elines:
eline=eline.replace("\n","")
if FR_MAC in eline:
ED_FRMAC =eline.split(', ')[0].replace(',','')
ED_TOMAC=eline.split(', ')[1].replace(',','')
ED_CT = eline.split(', ')[2].replace(',','')
if ED_TOMAC==ATO_MAC:
ED_CT=int(ED_CT)+1
eline=str(FR_MAC) + ", " + str(ATO_MAC) + ", " + str(ED_CT)
FOUND_REC=1
rf.write(eline + "\n")
if FOUND_REC=="":
rf.write(FR_MAC + ", " + ATO_MAC + ", 1")
DTYPEA=str(DTYPE) + " " + str(DTYPE3)
if DTYPEA=="PROBE RESPONSE":
STYPE=DTYPEA
if DTYPEA=="PROBE REQUEST":
STYPE=DTYPEA
if WPS1=="EAP" and WPS2=="WPS":
STYPE="WPS"
if str(TO_MAC)=="FF:FF:FF:FF:FF:FF":
BCast=1
else:
BCast=0
if len(FR_MAC)!=17:
FR_MAC=""
if len(TO_MAC)!=17 and len(TO_MAC2)==17:
TO_MAC=TO_MAC2
if len(TO_MAC2)!=17:
TO_MAC2=""
if len(TO_MAC)!=17:
TO_MAC=""
if FR_MAC!="":
BAK_FR_MAC=FR_MAC
open(resultlog,"a+b").write("Line : " + str(line) + "\n")
open(resultlog,"a+b").write("FoundType : " + str(FoundType) + "\n")
open(resultlog,"a+b").write("STYPE : " + str(STYPE) + "\n")
open(resultlog,"a+b").write("BCast : " + str(BCast) + "\n")
open(resultlog,"a+b").write("FR_MAC : " + str(FR_MAC) + " = " + str(len(FR_MAC))+ "\n")
open(resultlog,"a+b").write("TO_MAC : " + str(TO_MAC) + " = " + str(len(TO_MAC)) + "\n")
open(resultlog,"a+b").write("TO_MAC2 : " + str(TO_MAC2) + str(len(TO_MAC2)) +"\n")
open(resultlog,"a+b").write("DTYPE : " + str(DTYPE) + "\n")
open(resultlog,"a+b").write("DTYPE2 : " + str(DTYPE2) + "\n")
open(resultlog,"a+b").write("DTYPE3 : " + str(DTYPE3) + "\n")
open(resultlog,"a+b").write("WPS1 : " + str(WPS1) + "\n")
open(resultlog,"a+b").write("WPS2 : " + str(WPS2) + "\n")
open(resultlog,"a+b").write("WPS3 : " + str(WPS3) + "\n")
open(resultlog,"a+b").write("SSID : " + str(SSID) + "\n")
open(resultlog,"a+b").write("PSSID : " + str(PSSID) + "\n")
open(resultlog,"a+b").write("AESSID: " + str(AESSID) + "\n")
open(resultlog,"a+b").write("-----------------------------------------------------" + "\n")
GET_DATA="0"
GET_AUTH="0"
GET_DEAUTH="0"
GET_DISASSOC="0"
GET_REASSOC="0"
GET_ASSOC="0"
GET_RTS="0"
GET_CTS="0"
GET_ACK="0"
GET_EAPOL="0"
GET_WPS="0"
GET_BEACON="0"
GET_PRESP="0"
GET_PRQX="0"
GET_NULL="0"
GET_QOS="0"
GET_DATA86="0"
GET_DATA98="0"
GET_DATA94="0"
if STYPE=="DATA" or STYPE=="QOS":
if TO_MAC=="FF:FF:FF:FF:FF:FF":
GET_DATA="1"
if STYPE=="DATA":
if DTYPE2=="71" or DTYPE2=="73":
if TO_MAC[:9]=="01:00:5E:":
GET_DATA="1"
if STYPE=="DATA":
if DTYPE2=="98" and WPS2==".P....F.C":
GET_DATA98="1"
if STYPE=="DATA":
if DTYPE2=="94" and WPS2==".P...M.TC":
GET_DATA94="1"
if STYPE=="DATA" and WPS2==".P.....TC":
if FR_MAC[9:]==":00:00:00":
GET_DATA86="1"
if STYPE=="DATA":
if TO_MAC[:9]=="FF:F3:18:":
# print "DTYPE2 : " + str(DTYPE2)
GET_DATA="1"
if STYPE=="QOS": # DTYPE2=="103":
if WPS3==".P....F.C" or WPS2==".P....F.C":
GET_QOS="1"
if STYPE=="AUTHENTICATION":
GET_AUTH="1"
if STYPE=="DEAUTHENTICATION":
GET_DEAUTH="1"
if STYPE=="DISASSOCIATE":
GET_DISASSOC="1"
if STYPE=="ASSOCIATION":
GET_ASSOC="1"
if STYPE=="REASSOCIATION":
GET_REASSOC="1"
if STYPE=="REQUEST-TO-SEND":
GET_RTS="1"
if STYPE=="CLEAR-TO-SEND":
GET_CTS="1"
if STYPE=="ACKNOWLEDGEMENT":
GET_ACK="1"
if STYPE=="BEACON FRAME":
GET_BEACON="1"
open(essidfile,"a+b").write("")
if STYPE=="EAPOL":
GET_EAPOL="1"
if STYPE=="WPS":
GET_WPS="1"
if STYPE=="PROBE RESPONSE":
GET_PRESP="1"
if STYPE=="PROBE REQUEST":
GET_PRQX="1"
if STYPE=="NULL FUNCTION":
GET_NULL="1"
if STYPE=="DATA" or STYPE=="QOS" or STYPE=="AUTHENTICATION" or STYPE=="DEAUTHENTICATION" or STYPE=="ASSOCIATION" or STYPE=="DISASSOCIATE" or STYPE=="REASSOCIATION" or STYPE=="REQUEST-TO-SEND" or STYPE=="CLEAR-TO-SEND" or STYPE=="ACKNOWLEDGEMENT" or STYPE=="EAPOL" or STYPE=="WPS" or STYPE=="BEACON FRAME" or STYPE=="PROBE RESPONSE" or STYPE=="PROBE REQUEST" or STYPE=="NULL FUNCTION":
ListSR=0
ExistList=-1
ListLen=len(List_FrMAC)
if ListLen!=0:
while ListSR<ListLen:
if len(FR_MAC)==17 and len(TO_MAC)==17:
if List_FrMAC[ListSR]==FR_MAC and List_ToMAC[ListSR].find(TO_MAC)!=-1:
ExistList=ListSR
if List_FrMAC[ListSR]==FR_MAC and List_ToMAC[ListSR].find(TO_MAC)==-1 and ExistList==-1:
List_ToMAC[ListSR]=List_ToMAC[ListSR] + " / " + str(TO_MAC)
ExistList=ListSR
if len(FR_MAC)==0 and len(TO_MAC)==17 and ExistList==-1:
if List_FrMAC[ListSR]==TO_MAC:
ExistList=ListSR
if ExistList!=-1:
ListSR=ListLen
ListSR=ListSR+1
if ExistList==-1 and len(FR_MAC)==17: # and len(TO_MAC)==17: # NOT FOUND ON LIST
List_FrMAC.append(str(FR_MAC))
List_ToMAC.append(str(TO_MAC))
List_Data.append(str(GET_DATA))
List_Data86.append(str(GET_DATA86))
List_Data98.append(str(GET_DATA98))
List_Data94.append(str(GET_DATA94))
List_Auth.append(str(GET_AUTH))
List_Deauth.append(str(GET_DEAUTH))
List_Assoc.append(str(GET_ASSOC))
List_Reassoc.append(str(GET_REASSOC))
List_Disassoc.append(str(GET_DISASSOC))
List_RTS.append(str(GET_RTS))
List_CTS.append(str(GET_CTS))
List_ACK.append(str(GET_ACK))
List_EAPOL.append(str(GET_EAPOL))
List_WPS.append(str(GET_WPS))
List_NULL.append(str(GET_NULL))
List_QOS.append(str(GET_QOS))
List_Beacon.append(str(GET_BEACON))
List_PResp.append(str(GET_PRESP))
List_PReq.append(str(GET_PRQX))
List_SSID.append(str(SSID) + ", ")
List_ProbeName.append(str(PSSID) + ", ")
if AESSID!="":
List_IsAP.append("Yes")
else:
List_IsAP.append("No")
if ExistList!=-1: # FOUND ON LIST
GET_DATA=List_Data[ExistList]
GET_DATA86=List_Data86[ExistList]
GET_DATA98=List_Data98[ExistList]
GET_DATA94=List_Data94[ExistList]
GET_AUTH=List_Auth[ExistList]
GET_DEAUTH=List_Deauth[ExistList]
GET_ASSOC=List_Assoc[ExistList]
GET_REASSOC=List_Reassoc[ExistList]
GET_DISASSOC=List_Disassoc[ExistList]
GET_RTS=List_RTS[ExistList]
GET_CTS=List_CTS[ExistList]
GET_ACK=List_ACK[ExistList]
GET_EAPOL=List_EAPOL[ExistList]
GET_WPS=List_WPS[ExistList]
GET_BEACON=List_Beacon[ExistList]
GET_PRESP=List_PResp[ExistList]
GET_PRQX=List_PReq[ExistList]
GET_NULL=List_NULL[ExistList]
GET_QOS=List_QOS[ExistList]
SSID_List=[]
if List_SSID[ExistList]!="":
List_SSIDS=str(List_SSID[ExistList])
SSID_List=List_SSIDS.split(", ")
ProbeName_List=[]
if List_ProbeName[ExistList]!="":
List_ProbeNameS=str(List_ProbeName[ExistList])
ProbeName_List=List_ProbeNameS.split(", ")
if SSID!="":
List_IsAP[ExistList]="Yes"
lSSID=len(SSID_List)
lsid=0
FoundSSID="0"
if lSSID!=0 and SSID!="":
while lsid<lSSID:
if SSID_List[lsid]!="" and SSID_List[lsid]==str(SSID):
FoundSSID="1"
lsid=lSSID
lsid=lsid+1
if FoundSSID=="0":
if List_SSID[ExistList]==", ":
List_SSID[ExistList]=""
if SSID!="Broadcast":
List_SSID[ExistList]=List_SSID[ExistList] + str(SSID) + ", "
lSSID=len(ProbeName_List)
lsid=0
FoundProbeName="0"
if lSSID!=0 and PSSID!="":
while lsid<lSSID:
if ProbeName_List[lsid]!="" and ProbeName_List[lsid]==str(PSSID):
FoundProbeName="1"
lsid=lSSID
lsid=lsid+1
if FoundProbeName=="0":
if List_ProbeName[ExistList]==", ":
List_ProbeName[ExistList]=""
List_ProbeName[ExistList]=List_ProbeName[ExistList] + str(PSSID) + ", "
if STYPE=="DATA" and DTYPE2=="98" and WPS2==".P....F.C": # chopchop ??
GET_DATA98=str(int(GET_DATA98) + 1)
if STYPE=="DATA" and DTYPE2=="98" and WPS2==".P.....TC": # Interactive Replay ??
GET_DATA98=str(int(GET_DATA98) + 1)
if STYPE=="DATA" and DTYPE2=="94" and WPS2==".P...M.TC": # fragment PRGA
GET_DATA94=str(int(GET_DATA94) + 1)
if STYPE=="DATA" or STYPE=="QOS":
if TO_MAC=="FF:FF:FF:FF:FF:FF":
GET_DATA=str(int(GET_DATA) + 1)
if STYPE=="DATA":
if DTYPE2=="71" or DTYPE2=="73":
if TO_MAC[:9]=="01:00:5E:":
GET_DATA=str(int(GET_DATA) + 1)
if STYPE=="DATA":
if TO_MAC[:9]!="FF:FF:FF:" and TO_MAC[:3]=="FF:":
GET_DATA=str(int(GET_DATA) + 1)
if STYPE=="DATA" and WPS2==".P.....TC": # MDK mICHAEL SHUTDOWN EXPLOIT (TKIP)
if FR_MAC[9:]=="00:00:00":
GET_DATA86=str(int(GET_DATA86) + 1)
if STYPE=="AUTHENTICATION":
GET_AUTH=str(int( GET_AUTH) + 1)
if STYPE=="DEAUTHENTICATION":
GET_DEAUTH=str(int(GET_DEAUTH) + 1)
if STYPE=="DISASSOCIATE":
GET_DISASSOC=str(int(GET_DISASSOC) + 1)
if STYPE=="ASSOCIATION":
GET_ASSOC=str(int(GET_ASSOC) + 1)
if STYPE=="REASSOCIATION":
GET_REASSOC=str(int(GET_REASSOC) + 1)
if STYPE=="REQUEST-TO-SEND":
GET_RTS=str(int(GET_RTS) + 1)
if STYPE=="CLEAR-TO-SEND":
GET_CTS=str(int(GET_CTS) + 1)
if STYPE=="ACKNOWLEDGEMENT":
GET_ACK=str(int(GET_ACK) + 1)
if STYPE=="EAPOL":
GET_EAPOL=str(int(GET_EAPOL) + 1)
if STYPE=="WPS":
GET_WPS=str(int(GET_WPS) + 1)
if STYPE=="BEACON FRAME":
GET_BEACON=str(int(GET_BEACON) + 1)
if STYPE=="PROBE RESPONSE":
GET_PRESP=str(int(GET_PRESP) + 1)
if STYPE=="PROBE REQUEST":
GET_PRQX=str(int(GET_PRQX) + 1)
if STYPE=="NULL FUNCTION":
GET_NULL=str(int(GET_NULL) + 1)
if STYPE=="QOS" and TO_MAC[:9]!="FF:FF:FF:": # DTYPE2=="103":
if WPS3==".P....F.C" or WPS2==".P....F.C":
GET_QOS=str(int(GET_QOS) + 1)
List_Data[ExistList]=GET_DATA
List_Data86[ExistList]=GET_DATA86
List_Data98[ExistList]=GET_DATA98
List_Data94[ExistList]=GET_DATA94
List_Auth[ExistList]=GET_AUTH
List_Deauth[ExistList]=GET_DEAUTH
List_Assoc[ExistList]=GET_ASSOC
List_Reassoc[ExistList]=GET_REASSOC
List_Disassoc[ExistList]=GET_DISASSOC
List_RTS[ExistList]=GET_RTS
List_CTS[ExistList]=GET_CTS
List_ACK[ExistList]=GET_ACK
List_EAPOL[ExistList]=GET_EAPOL
List_WPS[ExistList]=GET_WPS
List_Beacon[ExistList]=GET_BEACON
List_PResp[ExistList]=GET_PRESP
List_PReq[ExistList]=GET_PRQX
List_NULL[ExistList]=GET_NULL
List_QOS[ExistList]=GET_QOS
if SSID!="" and List_SSID[ExistList]=="":
List_SSID[ExistList]=SSID + ", "
List_IsAP[ExistList]="Yes"
if PSSID!="" and List_ProbeName[ExistList]=="":
List_ProbeName[ExistList]=PSSID + ", "
if AESSID!="":
List_IsAP[ExistList]="Yes"
ExistList=-1
x=0
while x<len(List_FrMAC):
SSID_CT="0"
if List_SSID[x]!="":
if List_SSID[x][-2:]==", ":
List_SSID[x]=List_SSID[x][:-2]
List_SSID[x]=List_SSID[x].replace("Broadcast, ","").replace("Broadcast","")
SSID_List=List_SSID[x].split(", ")
SSID_CT=str(len(SSID_List))
if List_ProbeName[x]!="":
if List_ProbeName[x][-2:]==", ":
List_ProbeName[x]=List_ProbeName[x][:-2]
if List_ProbeName[x]!="" and List_SSID[x]!="":
if List_Beacon==0:
List_SSID[x]=""
List_IsAP[x]="No"
if List_SSID[x]=="":
SSID_CT="0"
List_SSIDCT.append(str(SSID_CT))
x=x+1
printl(fcolor.BRed + " ","","")
printl(fcolor.BRed + " Analysing Completed..\r","","")
if IsFileDirExist(resultlist)!="F":
open(resultlist,"wb").write("" + "\n")
ts = time.time()
DateTimeStamp=datetime.datetime.fromtimestamp(ts).strftime('%d/%m/%Y %H:%M:%S')
open(resultlist,"wb").write(tcpdump_log + "\n")
open(resultlist,"a+b").write("Date/Time\t:" + str(DateTimeStamp) + "\n")
x=0
l=len(List_FrMAC)
while x<l:
open(resultlist,"a+b").write("SN\tFR MAC \t\t\tBF \tIsAP? \tECT \tData \tData86 \tDat94 \tDat98 \tQOS\tAuth \tDeauth \tAssoc \tR.Asc \tD.Asc \tRTS \tCTS \tACK \tEAPOL \tWPS \tRQX \tResp \tNULL" + "\n")
open(resultlist,"a+b").write(str(x) + "\t" + List_FrMAC[x] + "\t" + List_Beacon[x] + "\t" + List_IsAP[x] + "\t" + List_SSIDCT[x] + "\t" + List_Data[x] + "\t" + List_Data86[x] + "\t" + List_Data94[x] + "\t" + List_Data98[x] + "\t" + List_QOS[x] + "\t" + List_Auth[x] + "\t" + List_Deauth[x] + "\t" + List_Assoc[x] + "\t" + List_Reassoc[x] + "\t" + List_Disassoc[x] + "\t" + List_RTS[x] + "\t" + List_CTS[x] + "\t" + List_ACK[x] + "\t" + List_EAPOL[x] + "\t" + List_WPS[x] + "\t" + List_PReq[x] + "\t" + List_PResp[x] + "\t" + List_NULL[x] + "\n")
open(resultlist,"a+b").write("ESSID\t" + List_SSID[x] + "\n")
open(resultlist,"a+b").write("Probe\t" + List_ProbeName[x] + "\n")
open(resultlist,"a+b").write("DEST\t" + List_ToMAC[x] + "\n\n")
x=x+1
open(resultlist,"a+b").write("" + "\n\n")
listlen=len(List_FrMAC)
listsr=0
Concern=0
AWPA=0
AWEP=0
AWPS=0
ATUN=0
AWNG=0
ACCP=0
ATFL=0
ABCF=0
MDKM=0
ASFL=0
PRGA=0
IARP=0
WPAD=0
WPSDetected=0
AType=""
if listlen!=0:
printl(fcolor.BRed + "\r","","")
while listsr<listlen:
ToMAC=List_ToMAC[listsr]
ToMACList=ToMAC.split(" / ")
tml=0
Multicast=0
Chopchop=0
while tml<len(ToMACList):
ChkMAC=ToMACList[tml]
if ChkMAC[:9]=="01:00:5E:":
Multicast=Multicast+1
if ChkMAC[:9]!="FF:FF:FF:" and ChkMAC[:3]=="FF:":
Chopchop=Chopchop+1
tml=tml+1
if int(List_Deauth[listsr])>=10:
FrMAC=str(List_FrMAC[listsr])
ToMAC=RemoveUnwantMAC(str(List_ToMAC[listsr]))
if ToMAC=="":
ToMAC = fcolor.BRed + "Broadcast"
if int(List_Disassoc[listsr])>=10:
Concern=Concern+1
printc (" ","","")
printc (".",fcolor.BGreen + "Deauth Flood detected calling from [ " + fcolor.BBlue + str(FrMAC) + fcolor.BGreen + " ] to [ " + fcolor.BBlue + str(ToMAC) + fcolor.BGreen + " ] with " + fcolor.BYellow + str(List_Deauth[listsr]) + fcolor.BGreen + " deauth packets","")
printc (".",fcolor.BGreen + "Dissassociation Flood detected calling from [ " + fcolor.BBlue + str(FrMAC) + fcolor.BGreen + " ] to [ " + fcolor.BBlue + str(ToMAC) + fcolor.BGreen + " ] with " + fcolor.BYellow + str(List_Disassoc[listsr]) + fcolor.BGreen + " disassociation packets","")
AType="DISASSOC"
WPAD="1"
ReturnResult=GetMACDetail(FrMAC,ToMAC,AType,"")
GenPrivacy=ReturnResult.split(",")[0].lstrip().rstrip()
printc (" ",fcolor.SWhite + "Possible MDK3 WPA Downgrade..","")
else:
Concern=Concern+1
printc (" ","","")
printc (".",fcolor.BGreen + "Deauth Flood detected calling from [ " + fcolor.BBlue + str(FrMAC) + fcolor.BGreen + " ] to [ " + fcolor.BBlue + str(ToMAC) + fcolor.BGreen + " ] with " + fcolor.BYellow + str(List_Deauth[listsr]) + fcolor.BGreen + " deauth packets","")
AType="DEAUTH"
ReturnResult=GetMACDetail(FrMAC,ToMAC,AType,"")
GenPrivacy=ReturnResult.split(",")[0].lstrip().rstrip()
if FrMAC=="00:00:00:00:00:00" or ToMAC=="00:00:00:00:00:00":
ATUN="1"
printc (" ",fcolor.SWhite + "Possible TKIPTUN-NG Signature..","")
else:
if str(GenPrivacy)=="WPA" or int(List_EAPOL[listsr])>0:
AWPA="1"
printc (" ",fcolor.BGreen + "Handshake Found [ " + fcolor.BBlue + str(List_EAPOL[listsr]) + fcolor.BGreen + " ] ","")
else:
if int(List_Deauth[listsr])>0:
FrMAC=str(List_FrMAC[listsr])
ToMAC=RemoveUnwantMAC(str(List_ToMAC[listsr]))
if List_FrMAC[listsr].find("00:00:00:00:00:00")!=-1 or List_ToMAC[listsr].find("00:00:00:00:00:00")!=-1:
Concern=Concern+1
printc (" ","","")
printc (".",fcolor.BGreen + "Deauth Flood detected calling from [ " + fcolor.BBlue + str(FrMAC) + fcolor.BGreen + " ] to [ " + fcolor.BBlue + str(ToMAC) + fcolor.BGreen + " ] with " + fcolor.BYellow + str(List_Deauth[listsr]) + fcolor.BGreen + " deauth packets","")
AType="DEAUTH"
ReturnResult=GetMACDetail(FrMAC,ToMAC,AType,"")
GenPrivacy=ReturnResult.split(",")[0]
ATUN="1"
printc (" ",fcolor.SWhite + "Possible TKIPTUN-NG Signature..","")
printc (" ",fcolor.BGreen + "Handshake Found [ " + fcolor.BBlue + str(List_EAPOL[listsr]) + fcolor.BGreen + " ] ","")
if int(List_Data[listsr])>=25:
FrMAC=RemoveUnwantMAC(str(List_FrMAC[listsr]))
ToMAC=RemoveUnwantMAC(str(List_ToMAC[listsr]))
if ToMAC=="":
ToMAC="Broadcast"
if int(List_Data[listsr])>30 and Multicast<=1 and Chopchop<=1:
Concern=Concern+1
printc (" ","","")
printc (".",fcolor.BGreen + "Unusual Data sending from [ " + fcolor.BBlue + str(FrMAC) + fcolor.BGreen + " ] to [ " + fcolor.BBlue + str(ToMAC) + fcolor.BGreen + " ] with " + fcolor.BYellow + str(List_Data[listsr]) + fcolor.BGreen + " Broadcast data packets","")
AType="BCDATA"
ReturnResult=GetMACDetail(FrMAC,ToMAC,AType,"")
GenPrivacy=ReturnResult.split(",")[0].lstrip().rstrip()
if str(GenPrivacy)=="WEP":
AWEP="1"
if Multicast>5:
Concern=Concern+1
printc (" ","","")
printc (".",fcolor.BGreen + "Possible attack using Wesside-NG from [ " + fcolor.BBlue + str(FrMAC) + fcolor.BGreen + " ] to [ " + fcolor.BBlue + str(ToMAC) + fcolor.BGreen + " ] with " + fcolor.BYellow + str(Multicast) + fcolor.BGreen + " Multicast data packets","")
AType="BCDATA"
ReturnResult=GetMACDetail(FrMAC,ToMAC,AType,"")
GenPrivacy=ReturnResult.split(",")[0].lstrip().rstrip()
AWNG="1"
if str(GenPrivacy)=="WEP":
AWEP="1"
if Chopchop>5:
Concern=Concern+1
printc (" ","","")
printc (".",fcolor.BGreen + "Possible attack using with Korek Chopchop method from [ " + fcolor.BBlue + str(FrMAC) + fcolor.BGreen + " ] to [ " + fcolor.BBlue + str(ToMAC) + fcolor.BGreen + " ] with " + fcolor.BYellow + str(Chopchop) + fcolor.BGreen + " data packets","")
AType="BCDATA"
ReturnResult=GetMACDetail(FrMAC,ToMAC,AType,"")
GenPrivacy=ReturnResult.split(",")[0].lstrip().rstrip()
ACCP="1"
if str(GenPrivacy)=="WEP":
AWEP="1"
if int(List_Data94[listsr])>=5:
Concern=Concern+1
FrMAC=RemoveUnwantMAC(str(List_FrMAC[listsr]))
ToMAC=RemoveUnwantMAC(str(List_ToMAC[listsr]))
if ToMAC=="":
ToMAC="Broadcast"
printc (" ","","")
printc (".",fcolor.BGreen + "Possible Fragmentation PRGA Attack from [ " + fcolor.BBlue + str(FrMAC) + fcolor.BGreen + " ] to [ " + fcolor.BBlue + str(ToMAC) + fcolor.BGreen + " ] with " + fcolor.BYellow + str(List_Data94[listsr]) + fcolor.BGreen + " data packets","")
AType="PRGA"
PRGA="1"
ReturnResult=GetMACDetail(FrMAC,ToMAC,AType,"")
GenPrivacy=ReturnResult.split(",")[0]
Privacy=ReturnResult.split(",")[1].lstrip().rstrip()
Cipher=ReturnResult.split(",")[2].lstrip().rstrip()
Authentication=ReturnResult.split(",")[3]
if str(GenPrivacy)=="WEP":
AWEP="1"
if int(List_Data86[listsr])>=5:
Concern=Concern+1
FrMAC=RemoveUnwantMAC(str(List_FrMAC[listsr]))
ToMAC=RemoveUnwantMAC(str(List_ToMAC[listsr]))
if ToMAC=="":
ToMAC="Broadcast"
printc (" ","","")
printc (".",fcolor.BGreen + "Possible MDK Michael shutdown exploitation (TKIP) from [ " + fcolor.BBlue + str(FrMAC) + fcolor.BGreen + " ] to [ " + fcolor.BBlue + str(ToMAC) + fcolor.BGreen + " ] with " + fcolor.BYellow + str(List_Data86[listsr]) + fcolor.BGreen + " data packets","")
AType="MDKM"
MDKM="1"
ReturnResult=GetMACDetail(FrMAC,ToMAC,AType,"")
GenPrivacy=ReturnResult.split(",")[0]
Privacy=ReturnResult.split(",")[1].lstrip().rstrip()
Cipher=ReturnResult.split(",")[2].lstrip().rstrip()
Authentication=ReturnResult.split(",")[3]
if int(List_QOS[listsr])>=1:
FrMAC=RemoveUnwantMAC(str(List_FrMAC[listsr]))
ToMAC=RemoveUnwantMAC(str(List_ToMAC[listsr]))
if ToMAC=="":
ToMAC="Broadcast"
PResult=printc (".","<$rs$>" + fcolor.BGreen + "High amount of QOS recieved from [ " + fcolor.BBlue + str(FrMAC) + fcolor.BGreen + " ] to [ " + fcolor.BBlue + str(ToMAC) + fcolor.BGreen + " ] with " + fcolor.BYellow + str(List_QOS[listsr]) + fcolor.BGreen + " QOS data packets","")
ReturnResult=GetMACDetail(FrMAC,ToMAC,AType,1)
GenPrivacy=ReturnResult.split(",")[0]
Privacy=ReturnResult.split(",")[1].lstrip().rstrip()
Cipher=ReturnResult.split(",")[2].lstrip().rstrip()
Authentication=ReturnResult.split(",")[3]
if Cipher=="TKIP":
AType="TUN"
ATUN="1"
Concern=Concern+1
printc (" ","","")
print PResult
print MACDetail + "\r"
print " " + fcolor.SWhite + "Note: Basing on signature, it could be attack by TKIPTUN-NG."
if int(List_Auth[listsr])>=5:
Concern=Concern+1
FrMAC=RemoveUnwantMAC(str(List_FrMAC[listsr]))
ToMAC=RemoveUnwantMAC(str(List_ToMAC[listsr]))
if int(List_Auth[listsr])<=80:
printc (" ","","")
printc (".",fcolor.BGreen + "Detected authentication sent from [ " + fcolor.BBlue + str(FrMAC) + fcolor.BGreen + " ] to [ " + fcolor.BBlue + str(ToMAC) + fcolor.BGreen + " ] with " + fcolor.BYellow + str(List_Auth[listsr]) + fcolor.BGreen + " authentication request detected","")
AType="AUTH"
RtnESSID=GetMACDetail(FrMAC,ToMAC,AType,"")
else:
printc (" ","","")
if len(List_ToMAC[listsr])>100:
ATFL="1"
printc (".",fcolor.BGreen + "Detected possible Authentication DOS on [ " + fcolor.BBlue + str(FrMAC) + fcolor.BGreen + " ] to [ " + fcolor.BBlue + "Many Clients" + fcolor.BGreen + " ] with " + fcolor.BRed + str(List_Auth[listsr]) + fcolor.BGreen + " authentication request detected","")
printc (" ",fcolor.SWhite + "Note: This situation usually seen on Aireplay-NG WPA Migration Mode.","")
Ask=AskQuestion ("There are a total of [ " + fcolor.BRed + str(len(List_ToMAC[listsr])) + fcolor.BGreen + " ] client's MAC captured, display them ?","y/N","","N","")
if Ask=="Y" or Ask=="y":
printc (".",fcolor.BGreen + "Client MAC [ " + fcolor.BBlue + str(ToMAC) + fcolor.BGreen + " ]","")
AType="AUTH"
RtnESSID=GetMACDetail(FrMAC,ToMAC,AType,"")
else:
ATFL="1"
printc (".",fcolor.BGreen + "Unusual high amount of authentication sent from [ " + fcolor.BBlue + str(FrMAC) + fcolor.BGreen + " ] to [ " + fcolor.BBlue + str(ToMAC) + fcolor.BGreen + " ] with " + fcolor.BRed + str(List_Auth[listsr]) + fcolor.BGreen + " authentication request detected","")
printc (" ",fcolor.SWhite + "Note: If amount is too high, likely to be Authentication DOS.","")
AType="AUTH"
RtnESSID=GetMACDetail(FrMAC,ToMAC,AType,"")
if int(List_Assoc[listsr])>=8:
Concern=Concern+1
FrMAC=RemoveUnwantMAC(str(List_FrMAC[listsr]))
ToMAC=RemoveUnwantMAC(str(List_ToMAC[listsr]))
printc (" ","","")
if len(List_ToMAC[listsr])>100:
ASFL="1"
printc (".",fcolor.BGreen + "Detected possible association flood on [ " + fcolor.BBlue + str(FrMAC) + fcolor.BGreen + " ] to [ " + fcolor.BBlue + "Many Clients" + fcolor.BGreen + " ] with " + fcolor.BRed + str(List_Assoc[listsr]) + fcolor.BGreen + " association request detected","")
Ask=AskQuestion ("There are a total of [ " + fcolor.BRed + str(len(List_ToMAC[listsr])) + fcolor.BGreen + " ] client's MAC captured, display them ?","y/N","","N","")
if Ask=="Y":
printc (".",fcolor.BGreen + "Client MAC [ " + fcolor.BBlue + str(ToMAC) + fcolor.BGreen + " ]","")
AType="ASSOC"
RtnESSID=GetMACDetail(FrMAC,ToMAC,AType,"")
else:
printc (".",fcolor.BGreen + "Unusual high amount of association sent from [ " + fcolor.BBlue + str(FrMAC) + fcolor.BGreen + " ] to [ " + fcolor.BBlue + str(ToMAC) + fcolor.BGreen + " ] with " + fcolor.BRed + str(List_Assoc[listsr]) + fcolor.BGreen + " association request detected","")
printc (" ",fcolor.SWhite + "Note: If amount is too high, likely to be Association flood.","")
AType="ASSOC"
RtnESSID=GetMACDetail(FrMAC,ToMAC,AType,"")
if Multicast>5:
printc (" ",fcolor.SWhite + "Note: Basing on signature, possible Wesside-NG attack with [ " + fcolor.SRed + str(Multicast) + fcolor.SWhite + " ] multicast detected." ,"")
if int(List_WPS[listsr])>=2:
Concern=Concern+1
WPSDetected=1
AWPS="1"
FrMAC=RemoveUnwantMAC(str(List_FrMAC[listsr]))
ToMAC=RemoveUnwantMAC(str(List_ToMAC[listsr]))
printc (" ","","")
printc (".",fcolor.BGreen + "EAP communication between AP and client sending from [ " + fcolor.BBlue + str(FrMAC) + fcolor.BGreen + " ] to [ " + fcolor.BBlue + str(ToMAC) + fcolor.BGreen + " ] with " + fcolor.BYellow + str(List_WPS[listsr]) + fcolor.BGreen + " EAP packets detected","")
AType="EAP"
RtnESSID=GetMACDetail(FrMAC,ToMAC,AType,"")
printc (" ",fcolor.SWhite + "Note: If constantly seeing EAP communication between this two devices, it is likely that a WPS bruteforce is in progress..","")
if int(List_SSIDCT[listsr])>=2:
FrMAC=str(List_FrMAC[listsr])
ToMAC=str(List_ToMAC[listsr])
if ToMAC!="FF:FF:FF:FF:FF:FF" or len(ToMAC)>17:
TMC=[]
AList=List_SSID[listsr] + ", "
TMC=AList.split(",")
FM="0"
if List_SSIDCT[listsr]=="2" or List_SSIDCT[listsr]=="3":
if List_SSIDCT[listsr]=="3":
if len(TMC[0].lstrip().rstrip())==len(TMC[1].lstrip().rstrip()) and len(TMC[1].lstrip().rstrip())==len(TMC[2].lstrip().rstrip()):
FM="1"
else:
if len(TMC[0].lstrip().rstrip())==len(TMC[1].lstrip().rstrip()):
FM="1"
if FM=="0":
AToMAC=ToMAC
if AToMAC=="FF:FF:FF:FF:FF:FF":
AToMAC=fcolor.BRed + "Broadcast"
else:
FrMAC=RemoveUnwantMAC(str(List_FrMAC[listsr]))
AToMAC=RemoveUnwantMAC(str(List_ToMAC[listsr]))
printc (" ","","")
SSIDCount=List_SSIDCT[listsr]
if List_SSID[listsr].find("Broadcast")!=-1 and AToMAC!="":
SSIDCount=int(SSIDCount)-1
FrMAC=RemoveUnwantMAC(str(List_FrMAC[listsr]))
AToMAC=RemoveUnwantMAC(str(List_ToMAC[listsr]))
Concern=Concern+1
RAPDetected=1
ARAP="1"
printc (".",fcolor.BGreen + "Suspect Rogue AP using [ " + fcolor.BBlue + str(FrMAC) + fcolor.BGreen + " ] and responsed to [ " + fcolor.BBlue + str(AToMAC) + fcolor.BGreen + " ] using " + fcolor.BYellow + str(SSIDCount) + fcolor.BGreen + " different SSID Name.","")
printc (".",fcolor.BGreen + "Broadcasted SSID Name [ " + fcolor.BBlue + str(List_SSID[listsr]) + fcolor.BGreen + " ]...","")
AType="RAP"
RtnESSID=GetMACDetail(FrMAC,ToMAC,AType,"")
printc (" ",fcolor.SWhite + "Note: If names look quite similar, it is unlikely to be Rogue AP as due to lost/malfunction packets.","")
if ToMAC=="FF:FF:FF:FF:FF:FF" and int(List_SSIDCT[listsr])>15 :
Concern=Concern+1
print ""
printc (".",fcolor.BGreen + "Detected possible 'Beacon Flood' using MAC Address [ " + fcolor.BBlue + str(FrMAC) + fcolor.BGreen + " ] with " + fcolor.BYellow + str(List_SSIDCT[listsr]) + fcolor.BGreen + " different SSID Name.","")
printc (".",fcolor.BGreen + "Broadcasted SSID Name [ " + fcolor.BBlue + str(List_SSID[listsr]) + fcolor.BGreen + " ]...","")
AType="BCF"
ABCF="1"
listsr = listsr +1
CheckSimilarESSID()
tcpdump_cap=tmpdir + "tcpdump.cap"
Result=""
if Concern==0:
if IsFileDirExist(tcpdump_cap)=="F":
statinfo = os.stat(tcpdump_cap)
filesize=statinfo.st_size
if filesize>=300:
Result=printc ("i","<$rs$>" + "" + fcolor.BYellow + DateTimeStamp + " - " + fcolor.SGreen + "Did not detect any suspicious activity ...\n","")
else:
Result=printc ("i","<$rs$>" + "" + fcolor.BBlue + DateTimeStamp + " - " + fcolor.BRed + str(Concern) + fcolor.BWhite + " concerns found...","")
WText=""
if AWEP=="1":
WText=str(WText) + "WEP , "
if AWNG=="1":
WText=str(WText) + "WESSIDE-NG , "
if ACCP=="1":
WText=str(WText) + "KoreK Chopchop , "
if AWPA=="1":
WText=str(WText) + "WPA , "
if ATUN=="1":
WText=str(WText) + "TKIPTUN-NG , "
if AWPS=="1":
WText=str(WText) + "WPS , "
if ATFL=="1":
WText=str(WText) + "Authentication DOS , "
if ASFL=="1":
WText=str(WText) + "Association DOS , "
if ABCF=="1":
WText=str(WText) + "Beacon Flood , "
if PRGA=="1":
WText=str(WText) + "Fragmentation PRGA , "
if IARP=="1":
WText=str(WText) + "ARP/Interactive Replay , "
if MDKM=="1":
WText=str(WText) + "MDK3 - Michael Shutdown Exploitation , "
if WPAD=="1":
WText=str(WText) + "MDK3 - WPA Downgrade Test , "
if WText!="":
WText=WText[:-3]
Result=Result + "\n" + fcolor.BGreen + " Possibility : " + fcolor.BRed + WText + " attacks."
if Concern!=0:
printc (" ","","")
printl(fcolor.BRed + " ","","")
printl(fcolor.BRed + "" + Result,"","")
if PrintToFile=="1" and Result!="":
open(LogFile,"a+b").write(RemoveColor(str(Result)) + "\n")
if Concern!=0:
open(LogFile,"a+b").write("\n")
if Concern!=0:
printc (" ","","")
DrawLine("_",fcolor.CReset + fcolor.Black,"")
printc (" ","","")
def GetESSID(MAC_ADDR):
ESSID_log=tmpdir + "ESSID.log"
ESSID=""
if IsFileDirExist(ESSID_log)=="F":
if len(MAC_ADDR)==17:
with open(ESSID_log,"r") as rf:
for eline in rf:
eline=eline.replace("\n","")
if len(eline)>=18:
if eline.find(MAC_ADDR)!=-1:
ESSID=eline.replace(MAC_ADDR + "\t","")
if ESSID!="(not associated)":
printc (" ",fcolor.BWhite + "[ " + fcolor.BBlue + str(MAC_ADDR) + fcolor.BWhite + " ]'s SSID Name is [ " + fcolor.BBlue + str(ESSID) + fcolor.BWhite + " ].","")
return ESSID
def GetESSIDOnly(MAC_ADDR):
ESSID_log=tmpdir + "ESSID.log"
ESSID=""
if IsFileDirExist(ESSID_log)=="F":
if len(MAC_ADDR)==17:
with open(ESSID_log,"r") as rf:
for eline in rf:
eline=eline.replace("\n","")
if len(eline)>=18:
if eline.find(MAC_ADDR)!=-1:
ESSID=eline.replace(MAC_ADDR + "\t","")
if ESSID!="(not associated)":
return ESSID
def GetESSIDOnlyText(MAC_ADDR):
ESSID_log=tmpdir + "ESSID.log"
TOText=""
if IsFileDirExist(ESSID_log)=="F":
if len(MAC_ADDR)==17:
with open(ESSID_log,"r") as rf:
for eline in rf:
eline=eline.replace("\n","")
if len(eline)>=18:
if eline.find(MAC_ADDR)!=-1:
ESSID=eline.replace(MAC_ADDR + "\t","")
if ESSID!="(not associated)":
TOText=" " + fcolor.BWhite + "[ " + fcolor.BBlue + str(MAC_ADDR) + fcolor.BWhite + " ]'s SSID Name is [ " + fcolor.BBlue + str(ESSID) + fcolor.BWhite + " ]."
return TOText
def UpdateClients():
global ClientList
global ESSIDList
global BSSIDList
global CL_ESSIDList
global CL_BSSIDList
global CL_MACList
global CL_CountList
ClientList=[]
ESSIDList=[]
BSSIDList=[]
ClientList=[]
CL_ESSIDList=[]
CL_BSSIDList=[]
CL_MACList=[]
CL_CountList=[]
NonAssociatedClient=""
ESSIDChangedName=""
ChangedAssociation=""
ESSID_log=tmpdir + "ESSID.log"
clientfile=tmpdir + "clients.log"
newcaptured=tmpdir + "CapturedListing.csv"
if IsFileDirExist(clientfile)!="F":
open(clientfile,"wb").write("" )
if IsFileDirExist(newcaptured)=="F":
ModiESSID=""
CLIENTS=""
linecount=0
TotalLine=GetFileLine(newcaptured,"0")
BRes=0
DisplayCt=0
printl(fcolor.SGreen + " Updating clients database....","","")
with open(newcaptured,"r") as f:
for line in f:
line=line.replace("\n","")
line=line.replace("\00","")
linecount=linecount+1
DisplayCt=DisplayCt+1
if DisplayCt>10:
completed=Percent(linecount / float(TotalLine),2)
BRes=printl(fcolor.SGreen + "Updating clients database... : " + str(completed) + ".." ,"2",BRes)
DisplayCt=0
if len(line)>5:
line=line + " ., ., ., ., ., ., ., ., ., ., ., ., ., ., ., ., ., ., ., ., ., ., ., ., ., ., .,"
line=line.replace("\r","")
CList=line.split(",")
FMAC=line.split()[0].replace(',','')
FS1=line.split()[0].replace(',','')
FS2=line.split()[1].replace(',','')
FS=str(FS1) + " " + str(FS2)
Privacy=CList[5].lstrip().rstrip()
Cipher=CList[6].lstrip().rstrip()
Authentication=CList[7].lstrip().rstrip()
Power=CList[8].lstrip().rstrip()
ESSID=CList[13].lstrip().rstrip().replace("\n","")
SMAC=CList[5].lstrip().rstrip()
ProbeNetwork=CList[6].rstrip() + " / " + CList[7] + " / " + CList[8] + " / " + CList[9] + " / " + CList[10] + " / " + CList[11] + " / " + CList[12] + " / " + CList[13] + " / " + CList[14]
ProbeNetwork=ProbeNetwork.replace("/ .","").lstrip().rstrip()
ProbeNetwork=ProbeNetwork.rstrip().lstrip()
if len(ProbeNetwork)>3:
if ProbeNetwork[-2:]==" .":
ProbeNetwork=ProbeNetwork[:-2].rstrip()
if ProbeNetwork==".":
ProbeNetwork=""
Privacy=Privacy.replace('WPA2WPA OPN','WPA2WPA (OPN)')
Privacy=Privacy.replace('WPA2 OPN','WPA2 (OPN)')
Privacy=Privacy.replace('WPA OPN','WPA (OPN)')
Privacy=Privacy.replace('WPA2WPA','WPA2/WPA')
Privacy=Privacy.replace('WEP OPN','WEP (OPN)')
Cipher=Cipher.replace('CCMP TKIP','CCMP/TKIP')
ESSID=CheckSSIDChr(ESSID)
if FS=="Station MAC":
CLIENTS=1
else:
if FMAC!="":
ESSIDList.append(str(ESSID))
BSSIDList.append(str(FMAC))
if CLIENTS==1 and len(FMAC)==17:
Result=""
if SMAC=="(not associated)":
if ProbeNetwork!="":
NonAssociatedClient=NonAssociatedClient + fcolor.BWhite + "\n Wireless Device [ " + fcolor.BBlue + str(FMAC) + fcolor.BWhite + " ] is not associated to any network and is probing for [ " + fcolor.BYellow + str(ProbeNetwork) + fcolor.BWhite + " ] .."
Result=GetMACOUI(FMAC,"0")
NonAssociatedClient=NonAssociatedClient + "\n" + Result
else:
NonAssociatedClient=NonAssociatedClient + fcolor.BWhite + "\n Wireless Device [ " + fcolor.BBlue + str(FMAC) + fcolor.BWhite + " ] is not associated to any network and did not probe for any SSID .."
Result=GetMACOUI(FMAC,"0")
NonAssociatedClient=NonAssociatedClient + "\n" + Result
clientfile=tmpdir + "clients.log"
tmpfile=tmpdir + "clients.tmp"
if IsFileDirExist(clientfile)=="F":
FOUNDBSSID=""
open(tmpfile,"a+b").write("")
with open(clientfile,"r") as f:
for line in f:
ESTN=""
EXAP=""
ESID=""
line=line.replace("\n","")
line=line.replace("\00","")
if len(line)>24:
line=line + ","
ClientList=line.split(",")
ESTN=ClientList[0]
EXAP=ClientList[1]
ESID=ClientList[2]
ESTN=ESTN.replace(",","").lstrip().rstrip()
EXAP=EXAP.replace(",","").lstrip().rstrip()
ESID=ESID.lstrip().rstrip()
xlbs=0
lBSSID=len(BSSIDList)
ESSID1=""
ESSID2=""
while xlbs<lBSSID:
if BSSIDList[xlbs]==str(EXAP) and len(EXAP)==17:
ESSID1=ESSIDList[xlbs]
if BSSIDList[xlbs]==str(SMAC) and len(SMAC)==17:
ESSID2=ESSIDList[xlbs]
ESSID=ESSIDList[xlbs]
xlbs=xlbs+1
xlbs=0
CLIENTMAC_FOUND=""
lBSSID=len(CL_BSSIDList)
while xlbs<lBSSID:
if CL_BSSIDList[xlbs]==str(EXAP):
CLIENTMAC_FOUND="1"
if len(EXAP)==17 and ESTN!="" and CL_MACList[xlbs].find(ESTN)==-1:
CL_MACList[xlbs]=CL_MACList[xlbs] + str(ESTN) + ", "
CLIENTMAC_COUNT=CL_CountList[xlbs]
if CLIENTMAC_COUNT=="":
CLIENTMAC_COUNT="0"
CLIENTMAC_COUNT=int(CLIENTMAC_COUNT)+1
CL_CountList[xlbs]=CLIENTMAC_COUNT
xlbs=xlbs+1
if CLIENTMAC_FOUND!="1" and len(ESTN)==17:
CL_ESSIDList.append(str(GetESSIDOnly(EXAP)))
CL_BSSIDList.append(str(EXAP))
CL_MACList.append(str(ESTN) + ", ")
CL_CountList.append("1")
CLIENTMAC_FOUND=""
if len(FMAC)==17 and ESTN==FMAC and ESSID!="" and ESSID!=".":
FOUNDBSSID="1"
if ESID!=ESSID and ESSID!="" and ESID!="" and len(SMAC)==17 and EXAP==SMAC:
TOText=fcolor.BWhite + "\n ESSID for [ " + fcolor.BBlue + str(EXAP) + fcolor.BWhite + " ] changed from [ " + fcolor.BYellow + str(ESID) + fcolor.BWhite + " ] to [ " + fcolor.BYellow + str(ESSID) + fcolor.BWhite + " ].."
ESSIDChangedName=ESSIDChangedName + str(TOText)
printl (TOText,"","")
ESID=ESSID
if len(SMAC)==17 and EXAP!=SMAC and ChangedAssociation.find(str(ESTN))==-1:
TOText=fcolor.BRed + "\nAlert : " + fcolor.SGreen + "Client [ " + fcolor.BBlue + str(ESTN) + fcolor.SGreen + " ] initally associated to [ " + fcolor.BCyan + str(EXAP) + fcolor.SGreen + " ] is now associated to [ " + fcolor.BRed + str(SMAC) + fcolor.SGreen + " ].."
ChangedAssociation=ChangedAssociation + str(TOText)
if ESSID1=="":
ESSID1=GetESSIDOnly(EXAP)
ChangedAssociation=ChangedAssociation + str(ESSID1)
else:
TOText=fcolor.BRed + "\n " + fcolor.SGreen + "BSSID [ " + fcolor.BCyan + str(EXAP) + fcolor.SGreen + " ]'s Name is [ " + fcolor.BWhite + str(ESSID1) + fcolor.SGreen + " ]."
ChangedAssociation=ChangedAssociation + str(TOText)
if ESSID2=="":
ESSID2=GetESSIDOnly(SMAC)
ChangedAssociation=ChangedAssociation + str(ESSID2)
else:
TOText=fcolor.BRed + "\n " + fcolor.SGreen + "BSSID [ " + fcolor.BRed + str(SMAC) + fcolor.SGreen + " ]'s Name is [ " + fcolor.BWhite + str(ESSID2) + fcolor.SGreen + " ]."
ChangedAssociation=ChangedAssociation + str(TOText)
EXAP=SMAC
ESID=ESSID
ChangedAssociation=ChangedAssociation + "\n"
ESTN=ESTN.replace(",","").lstrip().rstrip()
EXAP=EXAP.replace(",","").lstrip().rstrip()
ESID=ESID.lstrip().rstrip()
if ESTN!="" and EXAP!="" and ESID!="":
open(tmpfile,"a+b").write(str(ESTN) + ", " + str(EXAP) + ", " + str(ESID) + "\n")
if FOUNDBSSID=="":
lBSSID=len(BSSIDList)
xlbs=0
EXAP=""
ESID=""
while xlbs<lBSSID:
if BSSIDList[xlbs]==str(SMAC) and len(SMAC)==17:
ESTN=str(FMAC)
EXAP=BSSIDList[xlbs]
ESID=ESSIDList[xlbs]
xlbs=xlbs+1
if EXAP!="" and ESID!="":
open(clientfile,"a+b").write(str(ESTN) + ", " + str(EXAP) + ", " + str(ESID) + "\n")
open(tmpfile,"a+b").write(str(ESTN) + ", " + str(EXAP) + ", " + str(ESID) + "\n")
os.remove(clientfile)
os.rename(tmpfile,clientfile)
BRes=printl(fcolor.SGreen + " Clients database updated...." ,"","")
lBSSID=len(CL_BSSIDList)
xlbs=0
ClientFound=""
while xlbs<lBSSID:
if int(CL_CountList[xlbs])>100:
printc (" ","","")
ClientFound=CL_CountList[xlbs]
CL_MACList[xlbs]=CL_MACList[xlbs].replace(", "," / ")
CL_MACList[xlbs]=CL_MACList[xlbs][:-3]
printc ("!!!",fcolor.BRed + "Alert: " + fcolor.BGreen + "Too much association was found associated to [ " + fcolor.BBlue + str(CL_BSSIDList[xlbs]) + fcolor.BGreen + " ] basing on association listing..","")
ESSID=GetESSID(str(CL_BSSIDList[xlbs]))
if PrintToFile=="":
Ask=AskQuestion ("There are a total of [ " + fcolor.BRed + str(CL_CountList[xlbs]) + fcolor.BGreen + " ] client's MAC captured, display them ?","y/N","","N","")
else:
printc(" ",fcolor.BGreen + "There are a total of [ " + fcolor.BRed + str(CL_CountList[xlbs]) + fcolor.BGreen + " ] client's MAC captured.","")
Ask="Y"
if Ask=="Y" or Ask=="y":
printc (".",fcolor.BGreen + "Client MAC [ " + fcolor.BBlue + str(CL_MACList[xlbs]) + fcolor.BGreen + " ]","")
print ""
xlbs=xlbs+1
if ClientFound!="":
if IsFileDirExist(clientfile)=="F":
os.remove(clientfile)
if HIDEPROBE=="0":
if NonAssociatedClient!="":
BRes=printl(fcolor.SGreen + str(NonAssociatedClient) + "\n" ,"1","")
if PrintToFile=="1":
open(LogFile,"a+b").write(RemoveColor(str(NonAssociatedClient)) + "\n")
if ChangedAssociation!="":
BRes=printl(fcolor.SGreen + str(ChangedAssociation) + "\n" ,"1","")
if PrintToFile=="1":
open(LogFile,"a+b").write(RemoveColor(str(ChangedAssociation)) + "\n")
if ESSIDChangedName!="":
BRes=printl(fcolor.SGreen + str(ESSIDChangedName) + "\n" ,"1","")
if PrintToFile=="1":
open(LogFile,"a+b").write(RemoveColor(str(ESSIDChangedName)) + "\n")
def ConvertPackets():
captured_pcap=tmpdir + "tcpdump.cap"
tcpdump_log=tmpdir + "tcpdump.log"
Result=DelFile(tcpdump_log,"0")
RewriteCSV()
UpdateClients()
printl(fcolor.SGreen + " Converting captured packets... Please wait...","","")
ps=subprocess.Popen("tshark -r " + str(captured_pcap) + " -n -t ad > " + str(tcpdump_log), shell=True, stdout=subprocess.PIPE,stderr=open(os.devnull, 'w'))
ps.wait()
if PrintToFile=="1" and IsFileDirExist(captured_pcap)=="F":
statinfo = os.stat(captured_pcap)
open(LogFile,"a+b").write(">>>> Pkt Size : " + str(statinfo.st_size) + "\n")
if ps.returncode==0:
printl(fcolor.SGreen + " Conversion completed......","","")
return;
def RewriteCSV():
captured_csv=tmpdir + "captured-01.csv"
newcaptured_csv=tmpdir + "CapturedListing.csv"
open(newcaptured_csv,"wb").write("" )
if IsFileDirExist(captured_csv)=="F":
with open(captured_csv,"r") as f:
for line in f:
line=line.replace("\n","")
line=line.replace("\00","")
open(newcaptured_csv,"a+b").write(line + "\n")
def IsAscii(inputStr):
return all(ord(c) < 127 and ord(c) > 31 for c in inputStr)
def CheckSSIDChr(ESSID_Name):
if IsAscii(ESSID_Name)==False:
ESSID_Name=""
return ESSID_Name
from random import randrange
from math import floor
global NullOut
global MyMAC
DN = open(os.devnull, 'w')
DebugMode="0"
printd("Main Start Here -->")
cmdline=len(sys.argv)
TWidth=103
ProxyType="0"
tmpfile='/tmp/ipinfo'
global InfoIP
InfoIP=""
global HIDEPROBE
global TEMP_HIDEPROBE
TEMP_HIDEPROBE=""
HIDEPROBE="0"
InfoIPVia=""
InfoIPFwd=""
TimeStart=""
MyMAC=""
appdir="/SYWorks/WIDS/"
macoui="/SYWorks/WIDS/mac-oui.db"
PathList = ['tmp/']
tmpdir=appdir + "tmp/"
#global PrevIconCount
PrevIconCount=0
NullOut=" > /dev/null 2>&1"
global LogFile
global PrintToFile
PrintToFile="0"
LogFile=appdir + "log.txt"
try:
global MONList
captured_pcap=tmpdir + "tcpdump.cap"
captured_csv=tmpdir + "captured-01.csv"
MONList = []
global MONListC
MONListC = []
MonCt = GetInterfaceList("MON")
MONList=IFaceList
GetAppName()
CheckLinux()
CheckPyVersion("2.6")
os.system('clear')
DisplayAppDetail()
DisplayDescription()
CheckAdmin()
CheckAppLocation()
CheckRequiredFiles()
GetParameter("1")
RETRY=0
HIDEPROBE=TEMP_HIDEPROBE
PrintToFile=PRINTTOFILE
if ReadPacketOnly=="1":
if IsFileDirExist(captured_pcap)=="F" and IsFileDirExist(captured_csv)=="F":
print " Reading captured packet only..."
ConvertPackets()
AnalyseCaptured()
else:
printc ("!!!","[-ro] Function is use to read existing captured packet only...","")
printc (" ","Make sure all neccessary captured files is present in order to use this function...","")
exit()
ps=subprocess.Popen("ps -A | grep 'airodump-ng'" , shell=True, stdout=subprocess.PIPE)
Process=ps.stdout.read()
if Process!="":
ps=subprocess.Popen("killall 'airodump-ng'" , shell=True, stdout=subprocess.PIPE)
Process=ps.stdout.read()
ps=subprocess.Popen("ps -A | grep 'aireplay-ng'" , shell=True, stdout=subprocess.PIPE)
Process=ps.stdout.read()
if Process!="":
ps=subprocess.Popen("killall 'aireplay-ng'" , shell=True, stdout=subprocess.PIPE)
Process=ps.stdout.read()
printc ("i","Monitor Selection","")
MonCt = GetInterfaceList("MON")
WLANCt = GetInterfaceList("WLAN")
if MonCt==0 and WLANCt==0:
printc (".",fcolor.SRed + "No wireless interface detected !","")
exit(1)
if MonCt==0 and WLANCt!=0:
if SELECTED_IFACE=="":
SELECTED_IFACE=SelectInterfaceToUse()
else:
Rund="iwconfig " + SELECTED_IFACE + " > /dev/null 2>&1"
result=os.system(Rund)
if result==0:
printc(">",fcolor.BIGray + "Interface Selection Bypassed....","")
else:
printc ("!!!", fcolor.BRed + "The interface specified [ " + fcolor.BWhite + SELECTED_IFACE + fcolor.BRed + " ] is not available." ,"")
print ""
SELECTED_IFACE=SelectInterfaceToUse()
printc (" ", fcolor.SWhite + "Selected Interface ==> " + fcolor.BRed + str(SELECTED_IFACE),"")
print ""
ps=subprocess.Popen("ifconfig " + str(SELECTED_IFACE) + " up > /dev/null 2>&1" , shell=True, stdout=subprocess.PIPE,stderr=open(os.devnull, 'w'))
if MonCt==0:
printc (".",fcolor.SGreen + "Enabling monitoring for [ " + fcolor.BRed + SELECTED_IFACE + fcolor.SGreen + " ]...","")
ps=subprocess.Popen("ifconfig " + str(SELECTED_IFACE) + " down > /dev/null 2>&1", shell=True, stdout=subprocess.PIPE,stderr=open(os.devnull, 'w'))
ps.wait()
ps=subprocess.Popen("iwconfig " + str(SELECTED_IFACE) + " mode monitor > /dev/null 2>&1", shell=True, stdout=subprocess.PIPE,stderr=open(os.devnull, 'w'))
ps.wait()
ps=subprocess.Popen("ifconfig " + str(SELECTED_IFACE) + " up > /dev/null 2>&1", shell=True, stdout=subprocess.PIPE,stderr=open(os.devnull, 'w'))
ps.wait()
time.sleep (0.5)
MonCt = GetInterfaceList("MON")
if MonCt>=1:
if SELECTED_MON=="":
SELECTED_MON=SelectMonitorToUse()
else:
Rund="iwconfig " + SELECTED_MON + " > /dev/null 2>&1"
result=os.system(Rund)
if result==0:
printc(">",fcolor.BIGray + "Monitor Selection Bypassed....","")
else:
printc ("!!!", fcolor.BRed + "The monitoring interface specified [ " + fcolor.BWhite + SELECTED_MON + fcolor.BRed + " ] is not available." ,"")
print ""
SELECTED_MON=SelectMonitorToUse()
else:
SELECTED_MON=SelectMonitorToUse()
printc (" ", fcolor.SWhite + "Selected Monitoring Interface ==> " + fcolor.BRed + str(SELECTED_MON),"")
print ""
ps=subprocess.Popen("ifconfig " + str(SELECTED_MON) + " up > /dev/null 2>&1" , shell=True, stdout=subprocess.PIPE,stderr=open(os.devnull, 'w'))
x=0
while x<int(LoopCount):
captured_pcap=tmpdir + "captured"
CaptureTraffic()
ConvertPackets()
AnalyseCaptured()
x=x+1
if int(LoopCount)-x<3 and int(LoopCount)!=x:
printc (" ", "Remaining loop count : " + str(int(LoopCount)-x),"")
printc ("i", fcolor.BWhite + "Completed !! ","")
exit()
except (KeyboardInterrupt, SystemExit):
printd("KeyboardInterrupt - " + str(KeyboardInterrupt) + "\n SystemExit - " + str(SystemExit))
printc (" ","","")
printc ("*", fcolor.BRed + "Application shutdown !!","")
if TimeStart!="":
result=DisplayTimeStamp("summary-a","")
if PrintToFile=="1":
print fcolor.BGreen + " Result Log\t: " + fcolor.SGreen + LogFile
open(LogFile,"a+b").write("\n\n")
PrintToFile="0"
print ""
MonCt = GetInterfaceList("MON")
X=0
while X<MonCt:
PM=len(MONList)
Y=0
while Y<PM:
if MONList[Y]==IFaceList[X]:
IFaceList[Y]=""
Y=Y+1
X=X+1
PM=len(IFaceList)
Y=0
while Y<PM:
if IFaceList[Y]!="":
printc (".", "Stopping " + str(IFaceList[Y]) + "....","")
ps=subprocess.Popen("ifconfig " + str(IFaceList[Y]) + " down > /dev/null 2>&1", shell=True, stdout=subprocess.PIPE,stderr=open(os.devnull, 'w'))
ps.wait()
ps=subprocess.Popen("iwconfig " + str(IFaceList[Y]) + " mode managed > /dev/null 2>&1", shell=True, stdout=subprocess.PIPE,stderr=open(os.devnull, 'w'))
ps.wait()
ps=subprocess.Popen("ifconfig " + str(IFaceList[Y]) + " up > /dev/null 2>&1", shell=True, stdout=subprocess.PIPE,stderr=open(os.devnull, 'w'))
ps.wait()
time.sleep(0.1)
Y=Y+1
ps=subprocess.Popen("killall 'airodump-ng' > /dev/null 2>&1" , shell=True, stdout=subprocess.PIPE)
time.sleep(0.1)
ps=subprocess.Popen("killall 'tshark' > /dev/null 2>&1" , shell=True, stdout=subprocess.PIPE)
time.sleep(0.1)
print fcolor.BWhite + "Please support by liking my page at " + fcolor.BBlue + "https://www.facebook.com/syworks" +fcolor.BWhite + " (SYWorks-Programming)"
print ""
| gpl-2.0 |
andymckay/zamboni | mkt/webapps/management/commands/convert_icons.py | 1 | 2020 | import os
from optparse import make_option
from django.conf import settings
from django.core.management.base import BaseCommand
from django.core.files.storage import default_storage as storage
import amo
from amo.decorators import write
from amo.utils import chunked, resize_image
from mkt.site.storage_utils import walk_storage
from mkt.webapps.models import Addon
extensions = ['.png', '.jpg', '.gif']
sizes = amo.APP_ICON_SIZES
size_suffixes = ['-%s' % s for s in sizes]
@write
def convert(directory, delete=False):
print 'Converting icons in %s' % directory
pks = []
k = 0
for path, names, filenames in walk_storage(directory):
for filename in filenames:
old = os.path.join(path, filename)
pre, ext = os.path.splitext(old)
if (pre[-3:] in size_suffixes or ext not in extensions):
continue
if not storage.size(old):
print 'Icon %s is empty, ignoring.' % old
continue
for size, size_suffix in zip(sizes, size_suffixes):
new = '%s%s%s' % (pre, size_suffix, '.png')
if os.path.exists(new):
continue
resize_image(old, new, (size, size), remove_src=False)
if ext != '.png':
pks.append(os.path.basename(pre))
if delete:
storage.delete(old)
k += 1
if not k % 1000:
print "... converted %s" % k
for chunk in chunked(pks, 100):
Addon.objects.filter(pk__in=chunk).update(icon_type='image/png')
class Command(BaseCommand):
help = 'Process icons to -32, -48, -64 and optionally delete'
option_list = BaseCommand.option_list + (
make_option('--delete', action='store_true',
dest='delete', help='Deletes the old icons.'),
)
def handle(self, *args, **options):
start_dir = settings.ADDON_ICONS_PATH
convert(start_dir, delete=options.get('delete'))
| bsd-3-clause |
FrankBian/kuma | vendor/packages/translate-toolkit/translate/lang/test_ar.py | 7 | 1306 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from translate.lang import factory
def test_punctranslate():
"""Tests that we can translate punctuation."""
language = factory.getlanguage('ar')
assert language.punctranslate(u"") == u""
assert language.punctranslate(u"abc efg") == u"abc efg"
assert language.punctranslate(u"abc efg.") == u"abc efg."
assert language.punctranslate(u"abc, efg; d?") == u"abc، efg؛ d؟"
def test_sentences():
"""Tests basic functionality of sentence segmentation."""
language = factory.getlanguage('ar')
sentences = language.sentences(u"")
assert sentences == []
sentences = language.sentences(u"يوجد بالفعل مجلد بالإسم \"%s\". أترغب في استبداله؟")
print sentences
assert sentences == [u"يوجد بالفعل مجلد بالإسم \"%s\".", u"أترغب في استبداله؟"]
# This probably doesn't make sense: it is just the above reversed, to make sure
# we test the '؟' as an end of sentence marker.
sentences = language.sentences(u"أترغب في استبداله؟ يوجد بالفعل مجلد بالإسم \"%s\".")
print sentences
assert sentences == [u"أترغب في استبداله؟", u"يوجد بالفعل مجلد بالإسم \"%s\"."]
| mpl-2.0 |
droidlabour/git_intgrtn_aws_s3 | CreateSSHKey/Crypto/SelfTest/PublicKey/test_importKey.py | 112 | 14943 | # -*- coding: utf-8 -*-
#
# SelfTest/PublicKey/test_importKey.py: Self-test for importing RSA keys
#
# ===================================================================
# The contents of this file are dedicated to the public domain. To
# the extent that dedication to the public domain is not available,
# everyone is granted a worldwide, perpetual, royalty-free,
# non-exclusive license to exercise all rights associated with the
# contents of this file for any purpose whatsoever.
# No rights are reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================
from __future__ import nested_scopes
__revision__ = "$Id$"
import unittest
from Crypto.PublicKey import RSA
from Crypto.SelfTest.st_common import *
from Crypto.Util.py3compat import *
from Crypto.Util.number import inverse
from Crypto.Util import asn1
def der2pem(der, text='PUBLIC'):
import binascii
chunks = [ binascii.b2a_base64(der[i:i+48]) for i in range(0, len(der), 48) ]
pem = b('-----BEGIN %s KEY-----\n' % text)
pem += b('').join(chunks)
pem += b('-----END %s KEY-----' % text)
return pem
class ImportKeyTests(unittest.TestCase):
# 512-bit RSA key generated with openssl
rsaKeyPEM = u'''-----BEGIN RSA PRIVATE KEY-----
MIIBOwIBAAJBAL8eJ5AKoIsjURpcEoGubZMxLD7+kT+TLr7UkvEtFrRhDDKMtuII
q19FrL4pUIMymPMSLBn3hJLe30Dw48GQM4UCAwEAAQJACUSDEp8RTe32ftq8IwG8
Wojl5mAd1wFiIOrZ/Uv8b963WJOJiuQcVN29vxU5+My9GPZ7RA3hrDBEAoHUDPrI
OQIhAPIPLz4dphiD9imAkivY31Rc5AfHJiQRA7XixTcjEkojAiEAyh/pJHks/Mlr
+rdPNEpotBjfV4M4BkgGAA/ipcmaAjcCIQCHvhwwKVBLzzTscT2HeUdEeBMoiXXK
JACAr3sJQJGxIQIgarRp+m1WSKV1MciwMaTOnbU7wxFs9DP1pva76lYBzgUCIQC9
n0CnZCJ6IZYqSt0H5N7+Q+2Ro64nuwV/OSQfM6sBwQ==
-----END RSA PRIVATE KEY-----'''
# As above, but this is actually an unencrypted PKCS#8 key
rsaKeyPEM8 = u'''-----BEGIN PRIVATE KEY-----
MIIBVQIBADANBgkqhkiG9w0BAQEFAASCAT8wggE7AgEAAkEAvx4nkAqgiyNRGlwS
ga5tkzEsPv6RP5MuvtSS8S0WtGEMMoy24girX0WsvilQgzKY8xIsGfeEkt7fQPDj
wZAzhQIDAQABAkAJRIMSnxFN7fZ+2rwjAbxaiOXmYB3XAWIg6tn9S/xv3rdYk4mK
5BxU3b2/FTn4zL0Y9ntEDeGsMEQCgdQM+sg5AiEA8g8vPh2mGIP2KYCSK9jfVFzk
B8cmJBEDteLFNyMSSiMCIQDKH+kkeSz8yWv6t080Smi0GN9XgzgGSAYAD+KlyZoC
NwIhAIe+HDApUEvPNOxxPYd5R0R4EyiJdcokAICvewlAkbEhAiBqtGn6bVZIpXUx
yLAxpM6dtTvDEWz0M/Wm9rvqVgHOBQIhAL2fQKdkInohlipK3Qfk3v5D7ZGjrie7
BX85JB8zqwHB
-----END PRIVATE KEY-----'''
# The same RSA private key as in rsaKeyPEM, but now encrypted
rsaKeyEncryptedPEM=(
# With DES and passphrase 'test'
('test', u'''-----BEGIN RSA PRIVATE KEY-----
Proc-Type: 4,ENCRYPTED
DEK-Info: DES-CBC,AF8F9A40BD2FA2FC
Ckl9ex1kaVEWhYC2QBmfaF+YPiR4NFkRXA7nj3dcnuFEzBnY5XULupqQpQI3qbfA
u8GYS7+b3toWWiHZivHbAAUBPDIZG9hKDyB9Sq2VMARGsX1yW1zhNvZLIiVJzUHs
C6NxQ1IJWOXzTew/xM2I26kPwHIvadq+/VaT8gLQdjdH0jOiVNaevjWnLgrn1mLP
BCNRMdcexozWtAFNNqSzfW58MJL2OdMi21ED184EFytIc1BlB+FZiGZduwKGuaKy
9bMbdb/1PSvsSzPsqW7KSSrTw6MgJAFJg6lzIYvR5F4poTVBxwBX3+EyEmShiaNY
IRX3TgQI0IjrVuLmvlZKbGWP18FXj7I7k9tSsNOOzllTTdq3ny5vgM3A+ynfAaxp
dysKznQ6P+IoqML1WxAID4aGRMWka+uArOJ148Rbj9s=
-----END RSA PRIVATE KEY-----''',
"\xAF\x8F\x9A\x40\xBD\x2F\xA2\xFC"),
# With Triple-DES and passphrase 'rocking'
('rocking', u'''-----BEGIN RSA PRIVATE KEY-----
Proc-Type: 4,ENCRYPTED
DEK-Info: DES-EDE3-CBC,C05D6C07F7FC02F6
w4lwQrXaVoTTJ0GgwY566htTA2/t1YlimhxkxYt9AEeCcidS5M0Wq9ClPiPz9O7F
m6K5QpM1rxo1RUE/ZyI85gglRNPdNwkeTOqit+kum7nN73AToX17+irVmOA4Z9E+
4O07t91GxGMcjUSIFk0ucwEU4jgxRvYscbvOMvNbuZszGdVNzBTVddnShKCsy9i7
nJbPlXeEKYi/OkRgO4PtfqqWQu5GIEFVUf9ev1QV7AvC+kyWTR1wWYnHX265jU5c
sopxQQtP8XEHIJEdd5/p1oieRcWTCNyY8EkslxDSsrf0OtZp6mZH9N+KU47cgQtt
9qGORmlWnsIoFFKcDohbtOaWBTKhkj5h6OkLjFjfU/sBeV1c+7wDT3dAy5tawXjG
YSxC7qDQIT/RECvV3+oQKEcmpEujn45wAnkTi12BH30=
-----END RSA PRIVATE KEY-----''',
"\xC0\x5D\x6C\x07\xF7\xFC\x02\xF6"),
)
rsaPublicKeyPEM = u'''-----BEGIN PUBLIC KEY-----
MFwwDQYJKoZIhvcNAQEBBQADSwAwSAJBAL8eJ5AKoIsjURpcEoGubZMxLD7+kT+T
Lr7UkvEtFrRhDDKMtuIIq19FrL4pUIMymPMSLBn3hJLe30Dw48GQM4UCAwEAAQ==
-----END PUBLIC KEY-----'''
# Obtained using 'ssh-keygen -i -m PKCS8 -f rsaPublicKeyPEM'
rsaPublicKeyOpenSSH = '''ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAQQC/HieQCqCLI1EaXBKBrm2TMSw+/pE/ky6+1JLxLRa0YQwyjLbiCKtfRay+KVCDMpjzEiwZ94SS3t9A8OPBkDOF comment\n'''
# The private key, in PKCS#1 format encoded with DER
rsaKeyDER = a2b_hex(
'''3082013b020100024100bf1e27900aa08b23511a5c1281ae6d93312c3efe
913f932ebed492f12d16b4610c328cb6e208ab5f45acbe2950833298f312
2c19f78492dedf40f0e3c190338502030100010240094483129f114dedf6
7edabc2301bc5a88e5e6601dd7016220ead9fd4bfc6fdeb75893898ae41c
54ddbdbf1539f8ccbd18f67b440de1ac30440281d40cfac839022100f20f
2f3e1da61883f62980922bd8df545ce407c726241103b5e2c53723124a23
022100ca1fe924792cfcc96bfab74f344a68b418df578338064806000fe2
a5c99a023702210087be1c3029504bcf34ec713d877947447813288975ca
240080af7b094091b12102206ab469fa6d5648a57531c8b031a4ce9db53b
c3116cf433f5a6f6bbea5601ce05022100bd9f40a764227a21962a4add07
e4defe43ed91a3ae27bb057f39241f33ab01c1
'''.replace(" ",""))
# The private key, in unencrypted PKCS#8 format encoded with DER
rsaKeyDER8 = a2b_hex(
'''30820155020100300d06092a864886f70d01010105000482013f3082013
b020100024100bf1e27900aa08b23511a5c1281ae6d93312c3efe913f932
ebed492f12d16b4610c328cb6e208ab5f45acbe2950833298f3122c19f78
492dedf40f0e3c190338502030100010240094483129f114dedf67edabc2
301bc5a88e5e6601dd7016220ead9fd4bfc6fdeb75893898ae41c54ddbdb
f1539f8ccbd18f67b440de1ac30440281d40cfac839022100f20f2f3e1da
61883f62980922bd8df545ce407c726241103b5e2c53723124a23022100c
a1fe924792cfcc96bfab74f344a68b418df578338064806000fe2a5c99a0
23702210087be1c3029504bcf34ec713d877947447813288975ca240080a
f7b094091b12102206ab469fa6d5648a57531c8b031a4ce9db53bc3116cf
433f5a6f6bbea5601ce05022100bd9f40a764227a21962a4add07e4defe4
3ed91a3ae27bb057f39241f33ab01c1
'''.replace(" ",""))
rsaPublicKeyDER = a2b_hex(
'''305c300d06092a864886f70d0101010500034b003048024100bf1e27900a
a08b23511a5c1281ae6d93312c3efe913f932ebed492f12d16b4610c328c
b6e208ab5f45acbe2950833298f3122c19f78492dedf40f0e3c190338502
03010001
'''.replace(" ",""))
n = long('BF 1E 27 90 0A A0 8B 23 51 1A 5C 12 81 AE 6D 93 31 2C 3E FE 91 3F 93 2E BE D4 92 F1 2D 16 B4 61 0C 32 8C B6 E2 08 AB 5F 45 AC BE 29 50 83 32 98 F3 12 2C 19 F7 84 92 DE DF 40 F0 E3 C1 90 33 85'.replace(" ",""),16)
e = 65537L
d = long('09 44 83 12 9F 11 4D ED F6 7E DA BC 23 01 BC 5A 88 E5 E6 60 1D D7 01 62 20 EA D9 FD 4B FC 6F DE B7 58 93 89 8A E4 1C 54 DD BD BF 15 39 F8 CC BD 18 F6 7B 44 0D E1 AC 30 44 02 81 D4 0C FA C8 39'.replace(" ",""),16)
p = long('00 F2 0F 2F 3E 1D A6 18 83 F6 29 80 92 2B D8 DF 54 5C E4 07 C7 26 24 11 03 B5 E2 C5 37 23 12 4A 23'.replace(" ",""),16)
q = long('00 CA 1F E9 24 79 2C FC C9 6B FA B7 4F 34 4A 68 B4 18 DF 57 83 38 06 48 06 00 0F E2 A5 C9 9A 02 37'.replace(" ",""),16)
# This is q^{-1} mod p). fastmath and slowmath use pInv (p^{-1}
# mod q) instead!
qInv = long('00 BD 9F 40 A7 64 22 7A 21 96 2A 4A DD 07 E4 DE FE 43 ED 91 A3 AE 27 BB 05 7F 39 24 1F 33 AB 01 C1'.replace(" ",""),16)
pInv = inverse(p,q)
def testImportKey1(self):
"""Verify import of RSAPrivateKey DER SEQUENCE"""
key = self.rsa.importKey(self.rsaKeyDER)
self.failUnless(key.has_private())
self.assertEqual(key.n, self.n)
self.assertEqual(key.e, self.e)
self.assertEqual(key.d, self.d)
self.assertEqual(key.p, self.p)
self.assertEqual(key.q, self.q)
def testImportKey2(self):
"""Verify import of SubjectPublicKeyInfo DER SEQUENCE"""
key = self.rsa.importKey(self.rsaPublicKeyDER)
self.failIf(key.has_private())
self.assertEqual(key.n, self.n)
self.assertEqual(key.e, self.e)
def testImportKey3unicode(self):
"""Verify import of RSAPrivateKey DER SEQUENCE, encoded with PEM as unicode"""
key = RSA.importKey(self.rsaKeyPEM)
self.assertEqual(key.has_private(),True) # assert_
self.assertEqual(key.n, self.n)
self.assertEqual(key.e, self.e)
self.assertEqual(key.d, self.d)
self.assertEqual(key.p, self.p)
self.assertEqual(key.q, self.q)
def testImportKey3bytes(self):
"""Verify import of RSAPrivateKey DER SEQUENCE, encoded with PEM as byte string"""
key = RSA.importKey(b(self.rsaKeyPEM))
self.assertEqual(key.has_private(),True) # assert_
self.assertEqual(key.n, self.n)
self.assertEqual(key.e, self.e)
self.assertEqual(key.d, self.d)
self.assertEqual(key.p, self.p)
self.assertEqual(key.q, self.q)
def testImportKey4unicode(self):
"""Verify import of RSAPrivateKey DER SEQUENCE, encoded with PEM as unicode"""
key = RSA.importKey(self.rsaPublicKeyPEM)
self.assertEqual(key.has_private(),False) # failIf
self.assertEqual(key.n, self.n)
self.assertEqual(key.e, self.e)
def testImportKey4bytes(self):
"""Verify import of SubjectPublicKeyInfo DER SEQUENCE, encoded with PEM as byte string"""
key = RSA.importKey(b(self.rsaPublicKeyPEM))
self.assertEqual(key.has_private(),False) # failIf
self.assertEqual(key.n, self.n)
self.assertEqual(key.e, self.e)
def testImportKey5(self):
"""Verifies that the imported key is still a valid RSA pair"""
key = RSA.importKey(self.rsaKeyPEM)
idem = key.encrypt(key.decrypt(b("Test")),0)
self.assertEqual(idem[0],b("Test"))
def testImportKey6(self):
"""Verifies that the imported key is still a valid RSA pair"""
key = RSA.importKey(self.rsaKeyDER)
idem = key.encrypt(key.decrypt(b("Test")),0)
self.assertEqual(idem[0],b("Test"))
def testImportKey7(self):
"""Verify import of OpenSSH public key"""
key = self.rsa.importKey(self.rsaPublicKeyOpenSSH)
self.assertEqual(key.n, self.n)
self.assertEqual(key.e, self.e)
def testImportKey8(self):
"""Verify import of encrypted PrivateKeyInfo DER SEQUENCE"""
for t in self.rsaKeyEncryptedPEM:
key = self.rsa.importKey(t[1], t[0])
self.failUnless(key.has_private())
self.assertEqual(key.n, self.n)
self.assertEqual(key.e, self.e)
self.assertEqual(key.d, self.d)
self.assertEqual(key.p, self.p)
self.assertEqual(key.q, self.q)
def testImportKey9(self):
"""Verify import of unencrypted PrivateKeyInfo DER SEQUENCE"""
key = self.rsa.importKey(self.rsaKeyDER8)
self.failUnless(key.has_private())
self.assertEqual(key.n, self.n)
self.assertEqual(key.e, self.e)
self.assertEqual(key.d, self.d)
self.assertEqual(key.p, self.p)
self.assertEqual(key.q, self.q)
def testImportKey10(self):
"""Verify import of unencrypted PrivateKeyInfo DER SEQUENCE, encoded with PEM"""
key = self.rsa.importKey(self.rsaKeyPEM8)
self.failUnless(key.has_private())
self.assertEqual(key.n, self.n)
self.assertEqual(key.e, self.e)
self.assertEqual(key.d, self.d)
self.assertEqual(key.p, self.p)
self.assertEqual(key.q, self.q)
def testImportKey11(self):
"""Verify import of RSAPublicKey DER SEQUENCE"""
der = asn1.DerSequence([17, 3]).encode()
key = self.rsa.importKey(der)
self.assertEqual(key.n, 17)
self.assertEqual(key.e, 3)
def testImportKey12(self):
"""Verify import of RSAPublicKey DER SEQUENCE, encoded with PEM"""
der = asn1.DerSequence([17, 3]).encode()
pem = der2pem(der)
key = self.rsa.importKey(pem)
self.assertEqual(key.n, 17)
self.assertEqual(key.e, 3)
###
def testExportKey1(self):
key = self.rsa.construct([self.n, self.e, self.d, self.p, self.q, self.pInv])
derKey = key.exportKey("DER")
self.assertEqual(derKey, self.rsaKeyDER)
def testExportKey2(self):
key = self.rsa.construct([self.n, self.e])
derKey = key.exportKey("DER")
self.assertEqual(derKey, self.rsaPublicKeyDER)
def testExportKey3(self):
key = self.rsa.construct([self.n, self.e, self.d, self.p, self.q, self.pInv])
pemKey = key.exportKey("PEM")
self.assertEqual(pemKey, b(self.rsaKeyPEM))
def testExportKey4(self):
key = self.rsa.construct([self.n, self.e])
pemKey = key.exportKey("PEM")
self.assertEqual(pemKey, b(self.rsaPublicKeyPEM))
def testExportKey5(self):
key = self.rsa.construct([self.n, self.e])
openssh_1 = key.exportKey("OpenSSH").split()
openssh_2 = self.rsaPublicKeyOpenSSH.split()
self.assertEqual(openssh_1[0], openssh_2[0])
self.assertEqual(openssh_1[1], openssh_2[1])
def testExportKey4(self):
key = self.rsa.construct([self.n, self.e, self.d, self.p, self.q, self.pInv])
# Tuple with index #1 is encrypted with 3DES
t = map(b,self.rsaKeyEncryptedPEM[1])
# Force the salt being used when exporting
key._randfunc = lambda N: (t[2]*divmod(N+len(t[2]),len(t[2]))[0])[:N]
pemKey = key.exportKey("PEM", t[0])
self.assertEqual(pemKey, t[1])
def testExportKey5(self):
key = self.rsa.construct([self.n, self.e, self.d, self.p, self.q, self.pInv])
derKey = key.exportKey("DER", pkcs=8)
self.assertEqual(derKey, self.rsaKeyDER8)
def testExportKey6(self):
key = self.rsa.construct([self.n, self.e, self.d, self.p, self.q, self.pInv])
pemKey = key.exportKey("PEM", pkcs=8)
self.assertEqual(pemKey, b(self.rsaKeyPEM8))
class ImportKeyTestsSlow(ImportKeyTests):
def setUp(self):
self.rsa = RSA.RSAImplementation(use_fast_math=0)
class ImportKeyTestsFast(ImportKeyTests):
def setUp(self):
self.rsa = RSA.RSAImplementation(use_fast_math=1)
if __name__ == '__main__':
unittest.main()
def get_tests(config={}):
tests = []
try:
from Crypto.PublicKey import _fastmath
tests += list_test_cases(ImportKeyTestsFast)
except ImportError:
pass
tests += list_test_cases(ImportKeyTestsSlow)
return tests
if __name__ == '__main__':
suite = lambda: unittest.TestSuite(get_tests())
unittest.main(defaultTest='suite')
# vim:set ts=4 sw=4 sts=4 expandtab:
| mit |
rootfs/origin | cmd/service-catalog/go/src/github.com/kubernetes-incubator/service-catalog/vendor/k8s.io/kubernetes/translations/extract.py | 377 | 3965 | #!/usr/bin/env python
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Extract strings from command files and externalize into translation files.
Expects to be run from the root directory of the repository.
Usage:
extract.py pkg/kubectl/cmd/apply.go
"""
import fileinput
import sys
import re
class MatchHandler(object):
""" Simple holder for a regular expression and a function
to run if that regular expression matches a line.
The function should expect (re.match, file, linenumber) as parameters
"""
def __init__(self, regex, replace_fn):
self.regex = re.compile(regex)
self.replace_fn = replace_fn
def short_replace(match, file, line_number):
"""Replace a Short: ... cobra command description with an internationalization
"""
sys.stdout.write('{}i18n.T({}),\n'.format(match.group(1), match.group(2)))
SHORT_MATCH = MatchHandler(r'(\s+Short:\s+)("[^"]+"),', short_replace)
def import_replace(match, file, line_number):
"""Add an extra import for the i18n library.
Doesn't try to be smart and detect if it's already present, assumes a
gofmt round wil fix things.
"""
sys.stdout.write('{}\n"k8s.io/kubernetes/pkg/util/i18n"\n'.format(match.group(1)))
IMPORT_MATCH = MatchHandler('(.*"k8s.io/kubernetes/pkg/kubectl/cmd/util")', import_replace)
def string_flag_replace(match, file, line_number):
"""Replace a cmd.Flags().String("...", "", "...") with an internationalization
"""
sys.stdout.write('{}i18n.T("{})"))\n'.format(match.group(1), match.group(2)))
STRING_FLAG_MATCH = MatchHandler('(\s+cmd\.Flags\(\).String\("[^"]*", "[^"]*", )"([^"]*)"\)', string_flag_replace)
def long_string_replace(match, file, line_number):
return '{}i18n.T({}){}'.format(match.group(1), match.group(2), match.group(3))
LONG_DESC_MATCH = MatchHandler('(LongDesc\()(`[^`]+`)([^\n]\n)', long_string_replace)
EXAMPLE_MATCH = MatchHandler('(Examples\()(`[^`]+`)([^\n]\n)', long_string_replace)
def replace(filename, matchers, multiline_matchers):
"""Given a file and a set of matchers, run those matchers
across the file and replace it with the results.
"""
# Run all the matchers
line_number = 0
for line in fileinput.input(filename, inplace=True):
line_number += 1
matched = False
for matcher in matchers:
match = matcher.regex.match(line)
if match:
matcher.replace_fn(match, filename, line_number)
matched = True
break
if not matched:
sys.stdout.write(line)
sys.stdout.flush()
with open(filename, 'r') as datafile:
content = datafile.read()
for matcher in multiline_matchers:
match = matcher.regex.search(content)
while match:
rep = matcher.replace_fn(match, filename, 0)
# Escape back references in the replacement string
# (And escape for Python)
# (And escape for regex)
rep = re.sub('\\\\(\\d)', '\\\\\\\\\\1', rep)
content = matcher.regex.sub(rep, content, 1)
match = matcher.regex.search(content)
sys.stdout.write(content)
# gofmt the file again
from subprocess import call
call(["goimports", "-w", filename])
replace(sys.argv[1], [SHORT_MATCH, IMPORT_MATCH, STRING_FLAG_MATCH], [LONG_DESC_MATCH, EXAMPLE_MATCH])
| apache-2.0 |
joelcan/tools-eth-contract-dev | pyethereum/pyethereum/_version.py | 1 | 7911 | """
Version management with versioneer
https://github.com/warner/python-versioneer
Distribution through PyPI
1: git tag 0.6.31
2: python setup.py register sdist upload
Distributiuon through github
(i.e. users use github to generate tarballs with git archive)
1: git tag 0.6.31
2: git push; git push --tags
Tag Scheme:
major.minor.micro = 0.{Poc Num}.{ETHEREUM_PROTOCOL_VERSION}
pyethereum.packeter.Packeter.ETHEREUM_PROTOCOL_VERSION
"""
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.11 (https://github.com/warner/python-versioneer)
# these strings will be replaced by git during git-archive
git_refnames = "$Format:%d$"
git_full = "$Format:%H$"
# these strings are filled in when 'setup.py versioneer' creates _version.py
tag_prefix = ""
parentdir_prefix = "pyethereum-"
versionfile_source = "pyethereum/_version.py"
import os
import sys
import re
import subprocess
import errno
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False):
assert isinstance(commands, list)
p = None
for c in commands:
try:
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % args[0])
print(e)
return None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None
stdout = p.communicate()[0].strip()
if sys.version >= '3':
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % args[0])
return None
return stdout
def versions_from_parentdir(parentdir_prefix, root, verbose=False):
# Source tarballs conventionally unpack into a directory that includes
# both the project name and a version string.
dirname = os.path.basename(root)
if not dirname.startswith(parentdir_prefix):
if verbose:
print("guessing rootdir is '%s', but '%s' doesn't start with prefix '%s'" %
(root, dirname, parentdir_prefix))
return None
return {"version": dirname[len(parentdir_prefix):], "full": ""}
def git_get_keywords(versionfile_abs):
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
def git_versions_from_keywords(keywords, tag_prefix, verbose=False):
if not keywords:
return {} # keyword-finding function failed to find keywords
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
return {} # unexpanded, so not in an unpacked git-archive tarball
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return {"version": r,
"full": keywords["full"].strip()}
# no suitable tags, so we use the full revision id
if verbose:
print("no suitable tags, using full revision id")
return {"version": keywords["full"].strip(),
"full": keywords["full"].strip()}
def git_versions_from_vcs(tag_prefix, root, verbose=False):
# this runs 'git' from the root of the source tree. This only gets called
# if the git-archive 'subst' keywords were *not* expanded, and
# _version.py hasn't already been rewritten with a short version string,
# meaning we're inside a checked out source tree.
if not os.path.exists(os.path.join(root, ".git")):
if verbose:
print("no .git in %s" % root)
return {}
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
stdout = run_command(GITS, ["describe", "--tags", "--dirty", "--always"],
cwd=root)
if stdout is None:
return {}
if not stdout.startswith(tag_prefix):
if verbose:
print("tag '%s' doesn't start with prefix '%s'" % (stdout, tag_prefix))
return {}
tag = stdout[len(tag_prefix):]
stdout = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if stdout is None:
return {}
full = stdout.strip()
if tag.endswith("-dirty"):
full += "-dirty"
return {"version": tag, "full": full}
def get_versions(default={"version": "unknown", "full": ""}, verbose=False):
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
keywords = {"refnames": git_refnames, "full": git_full}
ver = git_versions_from_keywords(keywords, tag_prefix, verbose)
if ver:
return ver
try:
root = os.path.abspath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in range(len(versionfile_source.split(os.sep))):
root = os.path.dirname(root)
except NameError:
return default
return (git_versions_from_vcs(tag_prefix, root, verbose)
or versions_from_parentdir(parentdir_prefix, root, verbose)
or default)
| mit |
omni5cience/django-inlineformfield | .tox/py27/lib/python2.7/site-packages/IPython/nbconvert/preprocessors/extractoutput.py | 8 | 4702 | """Module containing a preprocessor that extracts all of the outputs from the
notebook file. The extracted outputs are returned in the 'resources' dictionary.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2013, the IPython Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import base64
import sys
import os
from mimetypes import guess_extension
from IPython.utils.traitlets import Unicode, Set
from .base import Preprocessor
from IPython.utils import py3compat
#-----------------------------------------------------------------------------
# Classes
#-----------------------------------------------------------------------------
class ExtractOutputPreprocessor(Preprocessor):
"""
Extracts all of the outputs from the notebook file. The extracted
outputs are returned in the 'resources' dictionary.
"""
output_filename_template = Unicode(
"{unique_key}_{cell_index}_{index}{extension}", config=True)
extract_output_types = Set({'png', 'jpeg', 'svg', 'application/pdf'}, config=True)
def preprocess_cell(self, cell, resources, cell_index):
"""
Apply a transformation on each cell,
Parameters
----------
cell : NotebookNode cell
Notebook cell being processed
resources : dictionary
Additional resources used in the conversion process. Allows
preprocessors to pass variables into the Jinja engine.
cell_index : int
Index of the cell being processed (see base.py)
"""
#Get the unique key from the resource dict if it exists. If it does not
#exist, use 'output' as the default. Also, get files directory if it
#has been specified
unique_key = resources.get('unique_key', 'output')
output_files_dir = resources.get('output_files_dir', None)
#Make sure outputs key exists
if not isinstance(resources['outputs'], dict):
resources['outputs'] = {}
#Loop through all of the outputs in the cell
for index, out in enumerate(cell.get('outputs', [])):
#Get the output in data formats that the template needs extracted
for out_type in self.extract_output_types:
if out_type in out:
data = out[out_type]
#Binary files are base64-encoded, SVG is already XML
if out_type in {'png', 'jpeg', 'application/pdf'}:
# data is b64-encoded as text (str, unicode)
# decodestring only accepts bytes
data = py3compat.cast_bytes(data)
data = base64.decodestring(data)
elif sys.platform == 'win32':
data = data.replace('\n', '\r\n').encode("UTF-8")
else:
data = data.encode("UTF-8")
# Build an output name
# filthy hack while we have some mimetype output, and some not
if '/' in out_type:
ext = guess_extension(out_type)
if ext is None:
ext = '.' + out_type.rsplit('/')[-1]
else:
ext = '.' + out_type
filename = self.output_filename_template.format(
unique_key=unique_key,
cell_index=cell_index,
index=index,
extension=ext)
#On the cell, make the figure available via
# cell.outputs[i].svg_filename ... etc (svg in example)
# Where
# cell.outputs[i].svg contains the data
if output_files_dir is not None:
filename = os.path.join(output_files_dir, filename)
out[out_type + '_filename'] = filename
#In the resources, make the figure available via
# resources['outputs']['filename'] = data
resources['outputs'][filename] = data
return cell, resources
| mit |
ntt-sic/neutron | neutron/tests/unit/test_api_v2_resource.py | 3 | 14593 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2012 Intel Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Zhongyue Luo, Intel Corporation.
#
import mock
from webob import exc
import webtest
from neutron.api.v2 import resource as wsgi_resource
from neutron.common import exceptions as q_exc
from neutron import context
from neutron.openstack.common import gettextutils
from neutron.tests import base
from neutron import wsgi
class RequestTestCase(base.BaseTestCase):
def setUp(self):
super(RequestTestCase, self).setUp()
self.req = wsgi_resource.Request({'foo': 'bar'})
def test_content_type_missing(self):
request = wsgi.Request.blank('/tests/123', method='POST')
request.body = "<body />"
self.assertIsNone(request.get_content_type())
def test_content_type_with_charset(self):
request = wsgi.Request.blank('/tests/123')
request.headers["Content-Type"] = "application/json; charset=UTF-8"
result = request.get_content_type()
self.assertEqual(result, "application/json")
def test_content_type_from_accept(self):
for content_type in ('application/xml',
'application/json'):
request = wsgi.Request.blank('/tests/123')
request.headers["Accept"] = content_type
result = request.best_match_content_type()
self.assertEqual(result, content_type)
def test_content_type_from_accept_best(self):
request = wsgi.Request.blank('/tests/123')
request.headers["Accept"] = "application/xml, application/json"
result = request.best_match_content_type()
self.assertEqual(result, "application/json")
request = wsgi.Request.blank('/tests/123')
request.headers["Accept"] = ("application/json; q=0.3, "
"application/xml; q=0.9")
result = request.best_match_content_type()
self.assertEqual(result, "application/xml")
def test_content_type_from_query_extension(self):
request = wsgi.Request.blank('/tests/123.xml')
result = request.best_match_content_type()
self.assertEqual(result, "application/xml")
request = wsgi.Request.blank('/tests/123.json')
result = request.best_match_content_type()
self.assertEqual(result, "application/json")
request = wsgi.Request.blank('/tests/123.invalid')
result = request.best_match_content_type()
self.assertEqual(result, "application/json")
def test_content_type_accept_and_query_extension(self):
request = wsgi.Request.blank('/tests/123.xml')
request.headers["Accept"] = "application/json"
result = request.best_match_content_type()
self.assertEqual(result, "application/xml")
def test_content_type_accept_default(self):
request = wsgi.Request.blank('/tests/123.unsupported')
request.headers["Accept"] = "application/unsupported1"
result = request.best_match_content_type()
self.assertEqual(result, "application/json")
def test_context_with_neutron_context(self):
ctxt = context.Context('fake_user', 'fake_tenant')
self.req.environ['neutron.context'] = ctxt
self.assertEqual(self.req.context, ctxt)
def test_context_without_neutron_context(self):
self.assertTrue(self.req.context.is_admin)
def test_best_match_language(self):
# Test that we are actually invoking language negotiation by webop
request = wsgi.Request.blank('/')
gettextutils.get_available_languages = mock.MagicMock()
gettextutils.get_available_languages.return_value = ['known-language',
'es', 'zh']
request.headers['Accept-Language'] = 'known-language'
language = request.best_match_language()
self.assertEqual(language, 'known-language')
# If the Accept-Leader is an unknown language, missing or empty,
# the best match locale should be None
request.headers['Accept-Language'] = 'unknown-language'
language = request.best_match_language()
self.assertIsNone(language)
request.headers['Accept-Language'] = ''
language = request.best_match_language()
self.assertIsNone(language)
request.headers.pop('Accept-Language')
language = request.best_match_language()
self.assertIsNone(language)
class ResourceTestCase(base.BaseTestCase):
def test_unmapped_neutron_error_with_json(self):
msg = u'\u7f51\u7edc'
class TestException(q_exc.NeutronException):
message = msg
expected_res = {'body': {
'NeutronError': {
'type': 'TestException',
'message': msg,
'detail': ''}}}
controller = mock.MagicMock()
controller.test.side_effect = TestException()
resource = webtest.TestApp(wsgi_resource.Resource(controller))
environ = {'wsgiorg.routing_args': (None, {'action': 'test',
'format': 'json'})}
res = resource.get('', extra_environ=environ, expect_errors=True)
self.assertEqual(res.status_int, exc.HTTPInternalServerError.code)
self.assertEqual(wsgi.JSONDeserializer().deserialize(res.body),
expected_res)
def test_unmapped_neutron_error_with_xml(self):
msg = u'\u7f51\u7edc'
class TestException(q_exc.NeutronException):
message = msg
expected_res = {'body': {
'NeutronError': {
'type': 'TestException',
'message': msg,
'detail': ''}}}
controller = mock.MagicMock()
controller.test.side_effect = TestException()
resource = webtest.TestApp(wsgi_resource.Resource(controller))
environ = {'wsgiorg.routing_args': (None, {'action': 'test',
'format': 'xml'})}
res = resource.get('', extra_environ=environ, expect_errors=True)
self.assertEqual(res.status_int, exc.HTTPInternalServerError.code)
self.assertEqual(wsgi.XMLDeserializer().deserialize(res.body),
expected_res)
@mock.patch('neutron.openstack.common.gettextutils.get_localized_message')
def test_unmapped_neutron_error_localized(self, mock_translation):
gettextutils.install('blaa', lazy=True)
msg_translation = 'Translated error'
mock_translation.return_value = msg_translation
msg = _('Unmapped error')
class TestException(q_exc.NeutronException):
message = msg
controller = mock.MagicMock()
controller.test.side_effect = TestException()
resource = webtest.TestApp(wsgi_resource.Resource(controller))
environ = {'wsgiorg.routing_args': (None, {'action': 'test',
'format': 'json'})}
res = resource.get('', extra_environ=environ, expect_errors=True)
self.assertEqual(res.status_int, exc.HTTPInternalServerError.code)
self.assertIn(msg_translation,
str(wsgi.JSONDeserializer().deserialize(res.body)))
def test_mapped_neutron_error_with_json(self):
msg = u'\u7f51\u7edc'
class TestException(q_exc.NeutronException):
message = msg
expected_res = {'body': {
'NeutronError': {
'type': 'TestException',
'message': msg,
'detail': ''}}}
controller = mock.MagicMock()
controller.test.side_effect = TestException()
faults = {TestException: exc.HTTPGatewayTimeout}
resource = webtest.TestApp(wsgi_resource.Resource(controller,
faults=faults))
environ = {'wsgiorg.routing_args': (None, {'action': 'test',
'format': 'json'})}
res = resource.get('', extra_environ=environ, expect_errors=True)
self.assertEqual(res.status_int, exc.HTTPGatewayTimeout.code)
self.assertEqual(wsgi.JSONDeserializer().deserialize(res.body),
expected_res)
def test_mapped_neutron_error_with_xml(self):
msg = u'\u7f51\u7edc'
class TestException(q_exc.NeutronException):
message = msg
expected_res = {'body': {
'NeutronError': {
'type': 'TestException',
'message': msg,
'detail': ''}}}
controller = mock.MagicMock()
controller.test.side_effect = TestException()
faults = {TestException: exc.HTTPGatewayTimeout}
resource = webtest.TestApp(wsgi_resource.Resource(controller,
faults=faults))
environ = {'wsgiorg.routing_args': (None, {'action': 'test',
'format': 'xml'})}
res = resource.get('', extra_environ=environ, expect_errors=True)
self.assertEqual(res.status_int, exc.HTTPGatewayTimeout.code)
self.assertEqual(wsgi.XMLDeserializer().deserialize(res.body),
expected_res)
@mock.patch('neutron.openstack.common.gettextutils.get_localized_message')
def test_mapped_neutron_error_localized(self, mock_translation):
gettextutils.install('blaa', lazy=True)
msg_translation = 'Translated error'
mock_translation.return_value = msg_translation
msg = _('Unmapped error')
class TestException(q_exc.NeutronException):
message = msg
controller = mock.MagicMock()
controller.test.side_effect = TestException()
faults = {TestException: exc.HTTPGatewayTimeout}
resource = webtest.TestApp(wsgi_resource.Resource(controller,
faults=faults))
environ = {'wsgiorg.routing_args': (None, {'action': 'test',
'format': 'json'})}
res = resource.get('', extra_environ=environ, expect_errors=True)
self.assertEqual(res.status_int, exc.HTTPGatewayTimeout.code)
self.assertIn(msg_translation,
str(wsgi.JSONDeserializer().deserialize(res.body)))
def test_http_error(self):
controller = mock.MagicMock()
controller.test.side_effect = exc.HTTPGatewayTimeout()
resource = webtest.TestApp(wsgi_resource.Resource(controller))
environ = {'wsgiorg.routing_args': (None, {'action': 'test'})}
res = resource.get('', extra_environ=environ, expect_errors=True)
self.assertEqual(res.status_int, exc.HTTPGatewayTimeout.code)
def test_unhandled_error_with_json(self):
expected_res = {'body': {'NeutronError':
_('Request Failed: internal server error '
'while processing your request.')}}
controller = mock.MagicMock()
controller.test.side_effect = Exception()
resource = webtest.TestApp(wsgi_resource.Resource(controller))
environ = {'wsgiorg.routing_args': (None, {'action': 'test',
'format': 'json'})}
res = resource.get('', extra_environ=environ, expect_errors=True)
self.assertEqual(res.status_int, exc.HTTPInternalServerError.code)
self.assertEqual(wsgi.JSONDeserializer().deserialize(res.body),
expected_res)
def test_unhandled_error_with_xml(self):
expected_res = {'body': {'NeutronError':
_('Request Failed: internal server error '
'while processing your request.')}}
controller = mock.MagicMock()
controller.test.side_effect = Exception()
resource = webtest.TestApp(wsgi_resource.Resource(controller))
environ = {'wsgiorg.routing_args': (None, {'action': 'test',
'format': 'xml'})}
res = resource.get('', extra_environ=environ, expect_errors=True)
self.assertEqual(res.status_int, exc.HTTPInternalServerError.code)
self.assertEqual(wsgi.XMLDeserializer().deserialize(res.body),
expected_res)
def test_status_200(self):
controller = mock.MagicMock()
controller.test = lambda request: {'foo': 'bar'}
resource = webtest.TestApp(wsgi_resource.Resource(controller))
environ = {'wsgiorg.routing_args': (None, {'action': 'test'})}
res = resource.get('', extra_environ=environ, expect_errors=True)
self.assertEqual(res.status_int, 200)
def test_status_204(self):
controller = mock.MagicMock()
controller.test = lambda request: {'foo': 'bar'}
resource = webtest.TestApp(wsgi_resource.Resource(controller))
environ = {'wsgiorg.routing_args': (None, {'action': 'delete'})}
res = resource.delete('', extra_environ=environ, expect_errors=True)
self.assertEqual(res.status_int, 204)
def test_no_route_args(self):
controller = mock.MagicMock()
resource = webtest.TestApp(wsgi_resource.Resource(controller))
environ = {}
res = resource.get('', extra_environ=environ, expect_errors=True)
self.assertEqual(res.status_int, exc.HTTPInternalServerError.code)
def test_post_with_body(self):
controller = mock.MagicMock()
controller.test = lambda request, body: {'foo': 'bar'}
resource = webtest.TestApp(wsgi_resource.Resource(controller))
environ = {'wsgiorg.routing_args': (None, {'action': 'test'})}
res = resource.post('', params='{"key": "val"}',
extra_environ=environ, expect_errors=True)
self.assertEqual(res.status_int, 200)
| apache-2.0 |
LyonsLab/coge | bin/last_wrapper/Bio/GA/Selection/RouletteWheel.py | 4 | 4701 | """Implement Roulette Wheel selection on a population.
This implements Roulette Wheel selection in which individuals are
selected from a population randomly, with their proportion of selection
based on their relative fitness in the population.
"""
# standard modules
import random
import copy
# local modules
from Abstract import AbstractSelection
class RouletteWheelSelection(AbstractSelection):
"""Roulette wheel selection proportional to individuals fitness.
The implements a roulette wheel selector that selects individuals
from the population, and performs mutation and crossover on
the selected individuals.
"""
def __init__(self, mutator, crossover, repairer = None):
"""Initialize the selector.
Arguments:
o mutator -- A Mutation object which will perform mutation
on an individual.
o crossover -- A Crossover object which will take two
individuals and produce two new individuals which may
have had crossover occur.
o repairer -- A class which can do repair on rearranged genomes
to eliminate infeasible individuals. If set at None, so repair
will be done.
"""
AbstractSelection.__init__(self, mutator, crossover, repairer)
def select(self, population):
"""Perform selection on the population based using a Roulette model.
Arguments:
o population -- A population of organisms on which we will perform
selection. The individuals are assumed to have fitness values which
are due to their current genome.
"""
# set up the current probabilities for selecting organisms
# from the population
prob_wheel = self._set_up_wheel(population)
probs = prob_wheel.keys()
probs.sort()
# now create the new population with the same size as the original
new_population = []
for pair_spin in range(len(population) // 2):
# select two individuals using roulette wheel selection
choice_num_1 = random.random()
choice_num_2 = random.random()
# now grab the two organisms from the probabilities
chosen_org_1 = None
chosen_org_2 = None
prev_prob = 0
for cur_prob in probs:
if choice_num_1 > prev_prob and choice_num_1 <= cur_prob:
chosen_org_1 = prob_wheel[cur_prob]
if choice_num_2 > prev_prob and choice_num_2 <= cur_prob:
chosen_org_2 = prob_wheel[cur_prob]
prev_prob = cur_prob
assert chosen_org_1 is not None, "Didn't select organism one"
assert chosen_org_2 is not None, "Didn't select organism two"
# do mutation and crossover to get the new organisms
new_org_1, new_org_2 = self.mutate_and_crossover(chosen_org_1,
chosen_org_2)
new_population.extend([new_org_1, new_org_2])
return new_population
def _set_up_wheel(self, population):
"""Set up the roulette wheel based on the fitnesses.
This creates a fitness proportional 'wheel' that will be used for
selecting based on random numbers.
Returns:
o A dictionary where the keys are the 'high' value that an
individual will be selected. The low value is determined by
the previous key in a sorted list of keys. For instance, if we
have a sorted list of keys like:
[.1, .3, .7, 1]
Then the individual whose key is .1 will be selected if a number
between 0 and .1 is chosen, the individual whose key is .3 will
be selected if the number is between .1 and .3, and so on.
The values of the dictionary are the organism instances.
"""
# first sum up the total fitness in the population
total_fitness = 0
for org in population:
total_fitness += org.fitness
# now create the wheel dictionary for all of the individuals
wheel_dict = {}
total_percentage = 0
for org in population:
org_percentage = float(org.fitness) / float(total_fitness)
# the organisms chance of being picked goes from the previous
# percentage (total_percentage) to the previous percentage
# plus the organisms specific fitness percentage
wheel_dict[total_percentage + org_percentage] = copy.copy(org)
# keep a running total of where we are at in the percentages
total_percentage += org_percentage
return wheel_dict
| bsd-2-clause |
SaM-Solutions/samba | source4/scripting/python/samba/netcmd/fsmo.py | 19 | 7846 | #!/usr/bin/env python
#
# Changes a FSMO role owner
#
# Copyright Nadezhda Ivanova 2009
# Copyright Jelmer Vernooij 2009
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import samba.getopt as options
import ldb
from ldb import LdbError
from samba.auth import system_session
from samba.netcmd import (
Command,
CommandError,
Option,
)
from samba.samdb import SamDB
class cmd_fsmo(Command):
"""Makes the targer DC transfer or seize a fsmo role [server connection needed]"""
synopsis = "(show | transfer <options> | seize <options>)"
takes_optiongroups = {
"sambaopts": options.SambaOptions,
"credopts": options.CredentialsOptions,
"versionopts": options.VersionOptions,
}
takes_options = [
Option("--host", help="LDB URL for database or target server", type=str),
Option("--force", help="Force seizing of the role without attempting to transfer first.", action="store_true"),
Option("--role", type="choice", choices=["rid", "pdc", "infrastructure","schema","naming","all"],
help="""The FSMO role to seize or transfer.\n
rid=RidAllocationMasterRole\n
schema=SchemaMasterRole\n
pdc=PdcEmulationMasterRole\n
naming=DomainNamingMasterRole\n
infrastructure=InfrastructureMasterRole\n
all=all of the above"""),
]
takes_args = ["subcommand"]
def transfer_role(self, role, samdb):
m = ldb.Message()
m.dn = ldb.Dn(samdb, "")
if role == "rid":
m["becomeRidMaster"]= ldb.MessageElement(
"1", ldb.FLAG_MOD_REPLACE,
"becomeRidMaster")
elif role == "pdc":
domain_dn = samdb.domain_dn()
res = samdb.search(domain_dn,
scope=ldb.SCOPE_BASE, attrs=["objectSid"])
assert len(res) == 1
sid = res[0]["objectSid"][0]
m["becomePdc"]= ldb.MessageElement(
sid, ldb.FLAG_MOD_REPLACE,
"becomePdc")
elif role == "naming":
m["becomeDomainMaster"]= ldb.MessageElement(
"1", ldb.FLAG_MOD_REPLACE,
"becomeDomainMaster")
samdb.modify(m)
elif role == "infrastructure":
m["becomeInfrastructureMaster"]= ldb.MessageElement(
"1", ldb.FLAG_MOD_REPLACE,
"becomeInfrastructureMaster")
elif role == "schema":
m["becomeSchemaMaster"]= ldb.MessageElement(
"1", ldb.FLAG_MOD_REPLACE,
"becomeSchemaMaster")
else:
raise CommandError("Invalid FSMO role.")
samdb.modify(m)
def seize_role(self, role, samdb, force):
res = samdb.search("",
scope=ldb.SCOPE_BASE, attrs=["dsServiceName"])
assert len(res) == 1
serviceName = res[0]["dsServiceName"][0]
domain_dn = samdb.domain_dn()
m = ldb.Message()
if role == "rid":
m.dn = ldb.Dn(samdb, self.rid_dn)
elif role == "pdc":
m.dn = ldb.Dn(samdb, domain_dn)
elif role == "naming":
m.dn = ldb.Dn(samdb, self.naming_dn)
elif role == "infrastructure":
m.dn = ldb.Dn(samdb, self.infrastructure_dn)
elif role == "schema":
m.dn = ldb.Dn(samdb, self.schema_dn)
else:
raise CommandError("Invalid FSMO role.")
#first try to transfer to avoid problem if the owner is still active
if force is None:
self.message("Attempting transfer...")
try:
self.transfer_role(role, samdb)
except LdbError, (num, _):
#transfer failed, use the big axe...
self.message("Transfer unsuccessfull, seizing...")
m["fSMORoleOwner"]= ldb.MessageElement(
serviceName, ldb.FLAG_MOD_REPLACE,
"fSMORoleOwner")
samdb.modify(m)
else:
self.message("Transfer succeeded.")
else:
self.message("Will not attempt transfer, seizing...")
m["fSMORoleOwner"]= ldb.MessageElement(
serviceName, ldb.FLAG_MOD_REPLACE,
"fSMORoleOwner")
samdb.modify(m)
def run(self, subcommand, force=None, host=None, role=None,
credopts=None, sambaopts=None, versionopts=None):
lp = sambaopts.get_loadparm()
creds = credopts.get_credentials(lp, fallback_machine=True)
samdb = SamDB(url=host, session_info=system_session(),
credentials=creds, lp=lp)
domain_dn = samdb.domain_dn()
self.infrastructure_dn = "CN=Infrastructure," + domain_dn
self.naming_dn = "CN=Partitions,CN=Configuration," + domain_dn
self.schema_dn = "CN=Schema,CN=Configuration," + domain_dn
self.rid_dn = "CN=RID Manager$,CN=System," + domain_dn
res = samdb.search(self.infrastructure_dn,
scope=ldb.SCOPE_BASE, attrs=["fSMORoleOwner"])
assert len(res) == 1
self.infrastructureMaster = res[0]["fSMORoleOwner"][0]
res = samdb.search(domain_dn,
scope=ldb.SCOPE_BASE, attrs=["fSMORoleOwner"])
assert len(res) == 1
self.pdcEmulator = res[0]["fSMORoleOwner"][0]
res = samdb.search(self.naming_dn,
scope=ldb.SCOPE_BASE, attrs=["fSMORoleOwner"])
assert len(res) == 1
self.namingMaster = res[0]["fSMORoleOwner"][0]
res = samdb.search(self.schema_dn,
scope=ldb.SCOPE_BASE, attrs=["fSMORoleOwner"])
assert len(res) == 1
self.schemaMaster = res[0]["fSMORoleOwner"][0]
res = samdb.search(self.rid_dn,
scope=ldb.SCOPE_BASE, attrs=["fSMORoleOwner"])
assert len(res) == 1
self.ridMaster = res[0]["fSMORoleOwner"][0]
if subcommand == "show":
self.message("InfrastructureMasterRole owner: " + self.infrastructureMaster)
self.message("RidAllocationMasterRole owner: " + self.ridMaster)
self.message("PdcEmulationMasterRole owner: " + self.pdcEmulator)
self.message("DomainNamingMasterRole owner: " + self.namingMaster)
self.message("SchemaMasterRole owner: " + self.schemaMaster)
elif subcommand == "transfer":
if role == "all":
self.transfer_role("rid", samdb)
self.transfer_role("pdc", samdb)
self.transfer_role("naming", samdb)
self.transfer_role("infrastructure", samdb)
self.transfer_role("schema", samdb)
else:
self.transfer_role(role, samdb)
elif subcommand == "seize":
if role == "all":
self.seize_role("rid", samdb, force)
self.seize_role("pdc", samdb, force)
self.seize_role("naming", samdb, force)
self.seize_role("infrastructure", samdb, force)
self.seize_role("schema", samdb, force)
else:
self.seize_role(role, samdb, force)
else:
raise CommandError("Wrong argument '%s'!" % subcommand)
| gpl-3.0 |
pjg101/SickRage | lib/html5lib/_inputstream.py | 45 | 32499 | from __future__ import absolute_import, division, unicode_literals
from six import text_type, binary_type
from six.moves import http_client, urllib
import codecs
import re
import webencodings
from .constants import EOF, spaceCharacters, asciiLetters, asciiUppercase
from .constants import _ReparseException
from . import _utils
from io import StringIO
try:
from io import BytesIO
except ImportError:
BytesIO = StringIO
# Non-unicode versions of constants for use in the pre-parser
spaceCharactersBytes = frozenset([item.encode("ascii") for item in spaceCharacters])
asciiLettersBytes = frozenset([item.encode("ascii") for item in asciiLetters])
asciiUppercaseBytes = frozenset([item.encode("ascii") for item in asciiUppercase])
spacesAngleBrackets = spaceCharactersBytes | frozenset([b">", b"<"])
invalid_unicode_no_surrogate = "[\u0001-\u0008\u000B\u000E-\u001F\u007F-\u009F\uFDD0-\uFDEF\uFFFE\uFFFF\U0001FFFE\U0001FFFF\U0002FFFE\U0002FFFF\U0003FFFE\U0003FFFF\U0004FFFE\U0004FFFF\U0005FFFE\U0005FFFF\U0006FFFE\U0006FFFF\U0007FFFE\U0007FFFF\U0008FFFE\U0008FFFF\U0009FFFE\U0009FFFF\U000AFFFE\U000AFFFF\U000BFFFE\U000BFFFF\U000CFFFE\U000CFFFF\U000DFFFE\U000DFFFF\U000EFFFE\U000EFFFF\U000FFFFE\U000FFFFF\U0010FFFE\U0010FFFF]" # noqa
if _utils.supports_lone_surrogates:
# Use one extra step of indirection and create surrogates with
# eval. Not using this indirection would introduce an illegal
# unicode literal on platforms not supporting such lone
# surrogates.
assert invalid_unicode_no_surrogate[-1] == "]" and invalid_unicode_no_surrogate.count("]") == 1
invalid_unicode_re = re.compile(invalid_unicode_no_surrogate[:-1] +
eval('"\\uD800-\\uDFFF"') + # pylint:disable=eval-used
"]")
else:
invalid_unicode_re = re.compile(invalid_unicode_no_surrogate)
non_bmp_invalid_codepoints = set([0x1FFFE, 0x1FFFF, 0x2FFFE, 0x2FFFF, 0x3FFFE,
0x3FFFF, 0x4FFFE, 0x4FFFF, 0x5FFFE, 0x5FFFF,
0x6FFFE, 0x6FFFF, 0x7FFFE, 0x7FFFF, 0x8FFFE,
0x8FFFF, 0x9FFFE, 0x9FFFF, 0xAFFFE, 0xAFFFF,
0xBFFFE, 0xBFFFF, 0xCFFFE, 0xCFFFF, 0xDFFFE,
0xDFFFF, 0xEFFFE, 0xEFFFF, 0xFFFFE, 0xFFFFF,
0x10FFFE, 0x10FFFF])
ascii_punctuation_re = re.compile("[\u0009-\u000D\u0020-\u002F\u003A-\u0040\u005C\u005B-\u0060\u007B-\u007E]")
# Cache for charsUntil()
charsUntilRegEx = {}
class BufferedStream(object):
"""Buffering for streams that do not have buffering of their own
The buffer is implemented as a list of chunks on the assumption that
joining many strings will be slow since it is O(n**2)
"""
def __init__(self, stream):
self.stream = stream
self.buffer = []
self.position = [-1, 0] # chunk number, offset
def tell(self):
pos = 0
for chunk in self.buffer[:self.position[0]]:
pos += len(chunk)
pos += self.position[1]
return pos
def seek(self, pos):
assert pos <= self._bufferedBytes()
offset = pos
i = 0
while len(self.buffer[i]) < offset:
offset -= len(self.buffer[i])
i += 1
self.position = [i, offset]
def read(self, bytes):
if not self.buffer:
return self._readStream(bytes)
elif (self.position[0] == len(self.buffer) and
self.position[1] == len(self.buffer[-1])):
return self._readStream(bytes)
else:
return self._readFromBuffer(bytes)
def _bufferedBytes(self):
return sum([len(item) for item in self.buffer])
def _readStream(self, bytes):
data = self.stream.read(bytes)
self.buffer.append(data)
self.position[0] += 1
self.position[1] = len(data)
return data
def _readFromBuffer(self, bytes):
remainingBytes = bytes
rv = []
bufferIndex = self.position[0]
bufferOffset = self.position[1]
while bufferIndex < len(self.buffer) and remainingBytes != 0:
assert remainingBytes > 0
bufferedData = self.buffer[bufferIndex]
if remainingBytes <= len(bufferedData) - bufferOffset:
bytesToRead = remainingBytes
self.position = [bufferIndex, bufferOffset + bytesToRead]
else:
bytesToRead = len(bufferedData) - bufferOffset
self.position = [bufferIndex, len(bufferedData)]
bufferIndex += 1
rv.append(bufferedData[bufferOffset:bufferOffset + bytesToRead])
remainingBytes -= bytesToRead
bufferOffset = 0
if remainingBytes:
rv.append(self._readStream(remainingBytes))
return b"".join(rv)
def HTMLInputStream(source, **kwargs):
# Work around Python bug #20007: read(0) closes the connection.
# http://bugs.python.org/issue20007
if (isinstance(source, http_client.HTTPResponse) or
# Also check for addinfourl wrapping HTTPResponse
(isinstance(source, urllib.response.addbase) and
isinstance(source.fp, http_client.HTTPResponse))):
isUnicode = False
elif hasattr(source, "read"):
isUnicode = isinstance(source.read(0), text_type)
else:
isUnicode = isinstance(source, text_type)
if isUnicode:
encodings = [x for x in kwargs if x.endswith("_encoding")]
if encodings:
raise TypeError("Cannot set an encoding with a unicode input, set %r" % encodings)
return HTMLUnicodeInputStream(source, **kwargs)
else:
return HTMLBinaryInputStream(source, **kwargs)
class HTMLUnicodeInputStream(object):
"""Provides a unicode stream of characters to the HTMLTokenizer.
This class takes care of character encoding and removing or replacing
incorrect byte-sequences and also provides column and line tracking.
"""
_defaultChunkSize = 10240
def __init__(self, source):
"""Initialises the HTMLInputStream.
HTMLInputStream(source, [encoding]) -> Normalized stream from source
for use by html5lib.
source can be either a file-object, local filename or a string.
The optional encoding parameter must be a string that indicates
the encoding. If specified, that encoding will be used,
regardless of any BOM or later declaration (such as in a meta
element)
"""
if not _utils.supports_lone_surrogates:
# Such platforms will have already checked for such
# surrogate errors, so no need to do this checking.
self.reportCharacterErrors = None
elif len("\U0010FFFF") == 1:
self.reportCharacterErrors = self.characterErrorsUCS4
else:
self.reportCharacterErrors = self.characterErrorsUCS2
# List of where new lines occur
self.newLines = [0]
self.charEncoding = (lookupEncoding("utf-8"), "certain")
self.dataStream = self.openStream(source)
self.reset()
def reset(self):
self.chunk = ""
self.chunkSize = 0
self.chunkOffset = 0
self.errors = []
# number of (complete) lines in previous chunks
self.prevNumLines = 0
# number of columns in the last line of the previous chunk
self.prevNumCols = 0
# Deal with CR LF and surrogates split over chunk boundaries
self._bufferedCharacter = None
def openStream(self, source):
"""Produces a file object from source.
source can be either a file object, local filename or a string.
"""
# Already a file object
if hasattr(source, 'read'):
stream = source
else:
stream = StringIO(source)
return stream
def _position(self, offset):
chunk = self.chunk
nLines = chunk.count('\n', 0, offset)
positionLine = self.prevNumLines + nLines
lastLinePos = chunk.rfind('\n', 0, offset)
if lastLinePos == -1:
positionColumn = self.prevNumCols + offset
else:
positionColumn = offset - (lastLinePos + 1)
return (positionLine, positionColumn)
def position(self):
"""Returns (line, col) of the current position in the stream."""
line, col = self._position(self.chunkOffset)
return (line + 1, col)
def char(self):
""" Read one character from the stream or queue if available. Return
EOF when EOF is reached.
"""
# Read a new chunk from the input stream if necessary
if self.chunkOffset >= self.chunkSize:
if not self.readChunk():
return EOF
chunkOffset = self.chunkOffset
char = self.chunk[chunkOffset]
self.chunkOffset = chunkOffset + 1
return char
def readChunk(self, chunkSize=None):
if chunkSize is None:
chunkSize = self._defaultChunkSize
self.prevNumLines, self.prevNumCols = self._position(self.chunkSize)
self.chunk = ""
self.chunkSize = 0
self.chunkOffset = 0
data = self.dataStream.read(chunkSize)
# Deal with CR LF and surrogates broken across chunks
if self._bufferedCharacter:
data = self._bufferedCharacter + data
self._bufferedCharacter = None
elif not data:
# We have no more data, bye-bye stream
return False
if len(data) > 1:
lastv = ord(data[-1])
if lastv == 0x0D or 0xD800 <= lastv <= 0xDBFF:
self._bufferedCharacter = data[-1]
data = data[:-1]
if self.reportCharacterErrors:
self.reportCharacterErrors(data)
# Replace invalid characters
data = data.replace("\r\n", "\n")
data = data.replace("\r", "\n")
self.chunk = data
self.chunkSize = len(data)
return True
def characterErrorsUCS4(self, data):
for _ in range(len(invalid_unicode_re.findall(data))):
self.errors.append("invalid-codepoint")
def characterErrorsUCS2(self, data):
# Someone picked the wrong compile option
# You lose
skip = False
for match in invalid_unicode_re.finditer(data):
if skip:
continue
codepoint = ord(match.group())
pos = match.start()
# Pretty sure there should be endianness issues here
if _utils.isSurrogatePair(data[pos:pos + 2]):
# We have a surrogate pair!
char_val = _utils.surrogatePairToCodepoint(data[pos:pos + 2])
if char_val in non_bmp_invalid_codepoints:
self.errors.append("invalid-codepoint")
skip = True
elif (codepoint >= 0xD800 and codepoint <= 0xDFFF and
pos == len(data) - 1):
self.errors.append("invalid-codepoint")
else:
skip = False
self.errors.append("invalid-codepoint")
def charsUntil(self, characters, opposite=False):
""" Returns a string of characters from the stream up to but not
including any character in 'characters' or EOF. 'characters' must be
a container that supports the 'in' method and iteration over its
characters.
"""
# Use a cache of regexps to find the required characters
try:
chars = charsUntilRegEx[(characters, opposite)]
except KeyError:
if __debug__:
for c in characters:
assert(ord(c) < 128)
regex = "".join(["\\x%02x" % ord(c) for c in characters])
if not opposite:
regex = "^%s" % regex
chars = charsUntilRegEx[(characters, opposite)] = re.compile("[%s]+" % regex)
rv = []
while True:
# Find the longest matching prefix
m = chars.match(self.chunk, self.chunkOffset)
if m is None:
# If nothing matched, and it wasn't because we ran out of chunk,
# then stop
if self.chunkOffset != self.chunkSize:
break
else:
end = m.end()
# If not the whole chunk matched, return everything
# up to the part that didn't match
if end != self.chunkSize:
rv.append(self.chunk[self.chunkOffset:end])
self.chunkOffset = end
break
# If the whole remainder of the chunk matched,
# use it all and read the next chunk
rv.append(self.chunk[self.chunkOffset:])
if not self.readChunk():
# Reached EOF
break
r = "".join(rv)
return r
def unget(self, char):
# Only one character is allowed to be ungotten at once - it must
# be consumed again before any further call to unget
if char is not None:
if self.chunkOffset == 0:
# unget is called quite rarely, so it's a good idea to do
# more work here if it saves a bit of work in the frequently
# called char and charsUntil.
# So, just prepend the ungotten character onto the current
# chunk:
self.chunk = char + self.chunk
self.chunkSize += 1
else:
self.chunkOffset -= 1
assert self.chunk[self.chunkOffset] == char
class HTMLBinaryInputStream(HTMLUnicodeInputStream):
"""Provides a unicode stream of characters to the HTMLTokenizer.
This class takes care of character encoding and removing or replacing
incorrect byte-sequences and also provides column and line tracking.
"""
def __init__(self, source, override_encoding=None, transport_encoding=None,
same_origin_parent_encoding=None, likely_encoding=None,
default_encoding="windows-1252", useChardet=True):
"""Initialises the HTMLInputStream.
HTMLInputStream(source, [encoding]) -> Normalized stream from source
for use by html5lib.
source can be either a file-object, local filename or a string.
The optional encoding parameter must be a string that indicates
the encoding. If specified, that encoding will be used,
regardless of any BOM or later declaration (such as in a meta
element)
"""
# Raw Stream - for unicode objects this will encode to utf-8 and set
# self.charEncoding as appropriate
self.rawStream = self.openStream(source)
HTMLUnicodeInputStream.__init__(self, self.rawStream)
# Encoding Information
# Number of bytes to use when looking for a meta element with
# encoding information
self.numBytesMeta = 1024
# Number of bytes to use when using detecting encoding using chardet
self.numBytesChardet = 100
# Things from args
self.override_encoding = override_encoding
self.transport_encoding = transport_encoding
self.same_origin_parent_encoding = same_origin_parent_encoding
self.likely_encoding = likely_encoding
self.default_encoding = default_encoding
# Determine encoding
self.charEncoding = self.determineEncoding(useChardet)
assert self.charEncoding[0] is not None
# Call superclass
self.reset()
def reset(self):
self.dataStream = self.charEncoding[0].codec_info.streamreader(self.rawStream, 'replace')
HTMLUnicodeInputStream.reset(self)
def openStream(self, source):
"""Produces a file object from source.
source can be either a file object, local filename or a string.
"""
# Already a file object
if hasattr(source, 'read'):
stream = source
else:
stream = BytesIO(source)
try:
stream.seek(stream.tell())
except: # pylint:disable=bare-except
stream = BufferedStream(stream)
return stream
def determineEncoding(self, chardet=True):
# BOMs take precedence over everything
# This will also read past the BOM if present
charEncoding = self.detectBOM(), "certain"
if charEncoding[0] is not None:
return charEncoding
# If we've been overriden, we've been overriden
charEncoding = lookupEncoding(self.override_encoding), "certain"
if charEncoding[0] is not None:
return charEncoding
# Now check the transport layer
charEncoding = lookupEncoding(self.transport_encoding), "certain"
if charEncoding[0] is not None:
return charEncoding
# Look for meta elements with encoding information
charEncoding = self.detectEncodingMeta(), "tentative"
if charEncoding[0] is not None:
return charEncoding
# Parent document encoding
charEncoding = lookupEncoding(self.same_origin_parent_encoding), "tentative"
if charEncoding[0] is not None and not charEncoding[0].name.startswith("utf-16"):
return charEncoding
# "likely" encoding
charEncoding = lookupEncoding(self.likely_encoding), "tentative"
if charEncoding[0] is not None:
return charEncoding
# Guess with chardet, if available
if chardet:
try:
from chardet.universaldetector import UniversalDetector
except ImportError:
pass
else:
buffers = []
detector = UniversalDetector()
while not detector.done:
buffer = self.rawStream.read(self.numBytesChardet)
assert isinstance(buffer, bytes)
if not buffer:
break
buffers.append(buffer)
detector.feed(buffer)
detector.close()
encoding = lookupEncoding(detector.result['encoding'])
self.rawStream.seek(0)
if encoding is not None:
return encoding, "tentative"
# Try the default encoding
charEncoding = lookupEncoding(self.default_encoding), "tentative"
if charEncoding[0] is not None:
return charEncoding
# Fallback to html5lib's default if even that hasn't worked
return lookupEncoding("windows-1252"), "tentative"
def changeEncoding(self, newEncoding):
assert self.charEncoding[1] != "certain"
newEncoding = lookupEncoding(newEncoding)
if newEncoding is None:
return
if newEncoding.name in ("utf-16be", "utf-16le"):
newEncoding = lookupEncoding("utf-8")
assert newEncoding is not None
elif newEncoding == self.charEncoding[0]:
self.charEncoding = (self.charEncoding[0], "certain")
else:
self.rawStream.seek(0)
self.charEncoding = (newEncoding, "certain")
self.reset()
raise _ReparseException("Encoding changed from %s to %s" % (self.charEncoding[0], newEncoding))
def detectBOM(self):
"""Attempts to detect at BOM at the start of the stream. If
an encoding can be determined from the BOM return the name of the
encoding otherwise return None"""
bomDict = {
codecs.BOM_UTF8: 'utf-8',
codecs.BOM_UTF16_LE: 'utf-16le', codecs.BOM_UTF16_BE: 'utf-16be',
codecs.BOM_UTF32_LE: 'utf-32le', codecs.BOM_UTF32_BE: 'utf-32be'
}
# Go to beginning of file and read in 4 bytes
string = self.rawStream.read(4)
assert isinstance(string, bytes)
# Try detecting the BOM using bytes from the string
encoding = bomDict.get(string[:3]) # UTF-8
seek = 3
if not encoding:
# Need to detect UTF-32 before UTF-16
encoding = bomDict.get(string) # UTF-32
seek = 4
if not encoding:
encoding = bomDict.get(string[:2]) # UTF-16
seek = 2
# Set the read position past the BOM if one was found, otherwise
# set it to the start of the stream
if encoding:
self.rawStream.seek(seek)
return lookupEncoding(encoding)
else:
self.rawStream.seek(0)
return None
def detectEncodingMeta(self):
"""Report the encoding declared by the meta element
"""
buffer = self.rawStream.read(self.numBytesMeta)
assert isinstance(buffer, bytes)
parser = EncodingParser(buffer)
self.rawStream.seek(0)
encoding = parser.getEncoding()
if encoding is not None and encoding.name in ("utf-16be", "utf-16le"):
encoding = lookupEncoding("utf-8")
return encoding
class EncodingBytes(bytes):
"""String-like object with an associated position and various extra methods
If the position is ever greater than the string length then an exception is
raised"""
def __new__(self, value):
assert isinstance(value, bytes)
return bytes.__new__(self, value.lower())
def __init__(self, value):
# pylint:disable=unused-argument
self._position = -1
def __iter__(self):
return self
def __next__(self):
p = self._position = self._position + 1
if p >= len(self):
raise StopIteration
elif p < 0:
raise TypeError
return self[p:p + 1]
def next(self):
# Py2 compat
return self.__next__()
def previous(self):
p = self._position
if p >= len(self):
raise StopIteration
elif p < 0:
raise TypeError
self._position = p = p - 1
return self[p:p + 1]
def setPosition(self, position):
if self._position >= len(self):
raise StopIteration
self._position = position
def getPosition(self):
if self._position >= len(self):
raise StopIteration
if self._position >= 0:
return self._position
else:
return None
position = property(getPosition, setPosition)
def getCurrentByte(self):
return self[self.position:self.position + 1]
currentByte = property(getCurrentByte)
def skip(self, chars=spaceCharactersBytes):
"""Skip past a list of characters"""
p = self.position # use property for the error-checking
while p < len(self):
c = self[p:p + 1]
if c not in chars:
self._position = p
return c
p += 1
self._position = p
return None
def skipUntil(self, chars):
p = self.position
while p < len(self):
c = self[p:p + 1]
if c in chars:
self._position = p
return c
p += 1
self._position = p
return None
def matchBytes(self, bytes):
"""Look for a sequence of bytes at the start of a string. If the bytes
are found return True and advance the position to the byte after the
match. Otherwise return False and leave the position alone"""
p = self.position
data = self[p:p + len(bytes)]
rv = data.startswith(bytes)
if rv:
self.position += len(bytes)
return rv
def jumpTo(self, bytes):
"""Look for the next sequence of bytes matching a given sequence. If
a match is found advance the position to the last byte of the match"""
newPosition = self[self.position:].find(bytes)
if newPosition > -1:
# XXX: This is ugly, but I can't see a nicer way to fix this.
if self._position == -1:
self._position = 0
self._position += (newPosition + len(bytes) - 1)
return True
else:
raise StopIteration
class EncodingParser(object):
"""Mini parser for detecting character encoding from meta elements"""
def __init__(self, data):
"""string - the data to work on for encoding detection"""
self.data = EncodingBytes(data)
self.encoding = None
def getEncoding(self):
methodDispatch = (
(b"<!--", self.handleComment),
(b"<meta", self.handleMeta),
(b"</", self.handlePossibleEndTag),
(b"<!", self.handleOther),
(b"<?", self.handleOther),
(b"<", self.handlePossibleStartTag))
for _ in self.data:
keepParsing = True
for key, method in methodDispatch:
if self.data.matchBytes(key):
try:
keepParsing = method()
break
except StopIteration:
keepParsing = False
break
if not keepParsing:
break
return self.encoding
def handleComment(self):
"""Skip over comments"""
return self.data.jumpTo(b"-->")
def handleMeta(self):
if self.data.currentByte not in spaceCharactersBytes:
# if we have <meta not followed by a space so just keep going
return True
# We have a valid meta element we want to search for attributes
hasPragma = False
pendingEncoding = None
while True:
# Try to find the next attribute after the current position
attr = self.getAttribute()
if attr is None:
return True
else:
if attr[0] == b"http-equiv":
hasPragma = attr[1] == b"content-type"
if hasPragma and pendingEncoding is not None:
self.encoding = pendingEncoding
return False
elif attr[0] == b"charset":
tentativeEncoding = attr[1]
codec = lookupEncoding(tentativeEncoding)
if codec is not None:
self.encoding = codec
return False
elif attr[0] == b"content":
contentParser = ContentAttrParser(EncodingBytes(attr[1]))
tentativeEncoding = contentParser.parse()
if tentativeEncoding is not None:
codec = lookupEncoding(tentativeEncoding)
if codec is not None:
if hasPragma:
self.encoding = codec
return False
else:
pendingEncoding = codec
def handlePossibleStartTag(self):
return self.handlePossibleTag(False)
def handlePossibleEndTag(self):
next(self.data)
return self.handlePossibleTag(True)
def handlePossibleTag(self, endTag):
data = self.data
if data.currentByte not in asciiLettersBytes:
# If the next byte is not an ascii letter either ignore this
# fragment (possible start tag case) or treat it according to
# handleOther
if endTag:
data.previous()
self.handleOther()
return True
c = data.skipUntil(spacesAngleBrackets)
if c == b"<":
# return to the first step in the overall "two step" algorithm
# reprocessing the < byte
data.previous()
else:
# Read all attributes
attr = self.getAttribute()
while attr is not None:
attr = self.getAttribute()
return True
def handleOther(self):
return self.data.jumpTo(b">")
def getAttribute(self):
"""Return a name,value pair for the next attribute in the stream,
if one is found, or None"""
data = self.data
# Step 1 (skip chars)
c = data.skip(spaceCharactersBytes | frozenset([b"/"]))
assert c is None or len(c) == 1
# Step 2
if c in (b">", None):
return None
# Step 3
attrName = []
attrValue = []
# Step 4 attribute name
while True:
if c == b"=" and attrName:
break
elif c in spaceCharactersBytes:
# Step 6!
c = data.skip()
break
elif c in (b"/", b">"):
return b"".join(attrName), b""
elif c in asciiUppercaseBytes:
attrName.append(c.lower())
elif c is None:
return None
else:
attrName.append(c)
# Step 5
c = next(data)
# Step 7
if c != b"=":
data.previous()
return b"".join(attrName), b""
# Step 8
next(data)
# Step 9
c = data.skip()
# Step 10
if c in (b"'", b'"'):
# 10.1
quoteChar = c
while True:
# 10.2
c = next(data)
# 10.3
if c == quoteChar:
next(data)
return b"".join(attrName), b"".join(attrValue)
# 10.4
elif c in asciiUppercaseBytes:
attrValue.append(c.lower())
# 10.5
else:
attrValue.append(c)
elif c == b">":
return b"".join(attrName), b""
elif c in asciiUppercaseBytes:
attrValue.append(c.lower())
elif c is None:
return None
else:
attrValue.append(c)
# Step 11
while True:
c = next(data)
if c in spacesAngleBrackets:
return b"".join(attrName), b"".join(attrValue)
elif c in asciiUppercaseBytes:
attrValue.append(c.lower())
elif c is None:
return None
else:
attrValue.append(c)
class ContentAttrParser(object):
def __init__(self, data):
assert isinstance(data, bytes)
self.data = data
def parse(self):
try:
# Check if the attr name is charset
# otherwise return
self.data.jumpTo(b"charset")
self.data.position += 1
self.data.skip()
if not self.data.currentByte == b"=":
# If there is no = sign keep looking for attrs
return None
self.data.position += 1
self.data.skip()
# Look for an encoding between matching quote marks
if self.data.currentByte in (b'"', b"'"):
quoteMark = self.data.currentByte
self.data.position += 1
oldPosition = self.data.position
if self.data.jumpTo(quoteMark):
return self.data[oldPosition:self.data.position]
else:
return None
else:
# Unquoted value
oldPosition = self.data.position
try:
self.data.skipUntil(spaceCharactersBytes)
return self.data[oldPosition:self.data.position]
except StopIteration:
# Return the whole remaining value
return self.data[oldPosition:]
except StopIteration:
return None
def lookupEncoding(encoding):
"""Return the python codec name corresponding to an encoding or None if the
string doesn't correspond to a valid encoding."""
if isinstance(encoding, binary_type):
try:
encoding = encoding.decode("ascii")
except UnicodeDecodeError:
return None
if encoding is not None:
try:
return webencodings.lookup(encoding)
except AttributeError:
return None
else:
return None
| gpl-3.0 |
iut-ibk/P8-WSC-GUI | 3dparty/Editra/src/syntax/_erlang.py | 3 | 4464 | ###############################################################################
# Name: erlang.py #
# Purpose: Define Erlang syntax for highlighting and other features #
# Author: Cody Precord <cprecord@editra.org> #
# Copyright: (c) 2007 Cody Precord <staff@editra.org> #
# License: wxWindows License #
###############################################################################
"""
FILE: erlang.py
AUTHOR: Cody Precord
@summary: Lexer configuration module for the Erlang Programming Language
@todo: better styling
"""
__author__ = "Cody Precord <cprecord@editra.org>"
__svnid__ = "$Id: _erlang.py 68798 2011-08-20 17:17:05Z CJP $"
__revision__ = "$Revision: 68798 $"
#-----------------------------------------------------------------------------#
# Imports
import wx
import wx.stc as stc
# Local Imports
import synglob
import syndata
#-----------------------------------------------------------------------------#
#---- Keyword Definitions ----#
KEYWORDS = (0, "compile define else endif export file ifdef ifndef import "
"include include_lib module record undef author copyright doc "
"after begin case catch cond end fun if let of query receive "
"when define record export import include include_lib else "
"endif undef apply attribute call do in letrec module primop "
"try")
#---- End Keyword Definitions ----#
#---- Syntax Style Specs ----#
SYNTAX_ITEMS = [(stc.STC_ERLANG_ATOM, 'default_style'), # need new tag
(stc.STC_ERLANG_CHARACTER, 'char_style'),
(stc.STC_ERLANG_COMMENT, 'comment_style'),
(stc.STC_ERLANG_DEFAULT, 'default_style'),
(stc.STC_ERLANG_FUNCTION_NAME, 'funct_style'),
(stc.STC_ERLANG_KEYWORD, 'keyword_style'),
(stc.STC_ERLANG_MACRO, 'pre_style'),
(stc.STC_ERLANG_NODE_NAME, 'string_style'), # maybe change
(stc.STC_ERLANG_NUMBER, 'number_style'),
(stc.STC_ERLANG_OPERATOR, 'operator_style'),
(stc.STC_ERLANG_RECORD, 'keyword2_style'),
(stc.STC_ERLANG_STRING, 'string_style'),
(stc.STC_ERLANG_UNKNOWN, 'unknown_style'),
(stc.STC_ERLANG_VARIABLE, 'default_style')] # need custom?
# Version specific
if wx.VERSION >= (2, 9, 0, 0, ''):
SYNTAX_ITEMS.append((stc.STC_ERLANG_ATOM_QUOTED, 'default_style')) # TODO
SYNTAX_ITEMS.append((stc.STC_ERLANG_BIFS, 'default_style')) # TODO
SYNTAX_ITEMS.append((stc.STC_ERLANG_COMMENT_DOC, 'dockey_style')) # TODO
SYNTAX_ITEMS.append((stc.STC_ERLANG_COMMENT_DOC_MACRO, 'dockey_style')) # TODO
SYNTAX_ITEMS.append((stc.STC_ERLANG_COMMENT_FUNCTION, 'comment_style')) # TODO
SYNTAX_ITEMS.append((stc.STC_ERLANG_COMMENT_MODULE, 'comment_style')) # TODO
SYNTAX_ITEMS.append((stc.STC_ERLANG_MACRO_QUOTED, 'default_style')) # TODO
SYNTAX_ITEMS.append((stc.STC_ERLANG_MODULES, 'default_style')) # TODO
SYNTAX_ITEMS.append((stc.STC_ERLANG_MODULES_ATT, 'default_style')) # TODO
SYNTAX_ITEMS.append((stc.STC_ERLANG_NODE_NAME_QUOTED, 'default_style')) # TODO
SYNTAX_ITEMS.append((stc.STC_ERLANG_PREPROC, 'pre_style')) # TODO
SYNTAX_ITEMS.append((stc.STC_ERLANG_RECORD_QUOTED, 'default_style')) # TODO
else:
SYNTAX_ITEMS.append((stc.STC_ERLANG_SEPARATOR, 'default_style')) # need style?
#---- Extra Properties ----#
FOLD = ('fold', '1')
FOLD_CMT = ('fold.comments', '1')
FOLD_KW = ('fold.keywords', '1')
FOLD_BRACE = ('fold.braces', '1')
#-----------------------------------------------------------------------------#
class SyntaxData(syndata.SyntaxDataBase):
"""SyntaxData object for Erlang"""
def __init__(self, langid):
super(SyntaxData, self).__init__(langid)
# Setup
self.SetLexer(stc.STC_LEX_ERLANG)
def GetKeywords(self):
"""Returns Specified Keywords List """
return [KEYWORDS]
def GetSyntaxSpec(self):
"""Syntax Specifications """
return SYNTAX_ITEMS
def GetProperties(self):
"""Returns a list of Extra Properties to set """
return [FOLD]
def GetCommentPattern(self):
"""Returns a list of characters used to comment a block of code """
return [u'%%']
| gpl-2.0 |
zubair-arbi/edx-platform | common/djangoapps/student/tests/test_enrollment.py | 96 | 8505 | """
Tests for student enrollment.
"""
import ddt
import unittest
from mock import patch
from django.conf import settings
from django.core.urlresolvers import reverse
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
from util.testing import UrlResetMixin
from embargo.test_utils import restrict_course
from student.tests.factories import UserFactory, CourseModeFactory
from student.models import CourseEnrollment
@ddt.ddt
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Test only valid in lms')
class EnrollmentTest(UrlResetMixin, ModuleStoreTestCase):
"""
Test student enrollment, especially with different course modes.
"""
USERNAME = "Bob"
EMAIL = "bob@example.com"
PASSWORD = "edx"
@patch.dict(settings.FEATURES, {'EMBARGO': True})
def setUp(self):
""" Create a course and user, then log in. """
super(EnrollmentTest, self).setUp('embargo')
self.course = CourseFactory.create()
self.user = UserFactory.create(username=self.USERNAME, email=self.EMAIL, password=self.PASSWORD)
self.client.login(username=self.USERNAME, password=self.PASSWORD)
self.urls = [
reverse('course_modes_choose', kwargs={'course_id': unicode(self.course.id)})
]
@ddt.data(
# Default (no course modes in the database)
# Expect that we're redirected to the dashboard
# and automatically enrolled as "honor"
([], '', 'honor'),
# Audit / Verified / Honor
# We should always go to the "choose your course" page.
# We should also be enrolled as "honor" by default.
(['honor', 'verified', 'audit'], 'course_modes_choose', 'honor'),
# Professional ed
# Expect that we're sent to the "choose your track" page
# (which will, in turn, redirect us to a page where we can verify/pay)
# We should NOT be auto-enrolled, because that would be giving
# away an expensive course for free :)
(['professional'], 'course_modes_choose', None),
(['no-id-professional'], 'course_modes_choose', None),
)
@ddt.unpack
def test_enroll(self, course_modes, next_url, enrollment_mode):
# Create the course modes (if any) required for this test case
for mode_slug in course_modes:
CourseModeFactory.create(
course_id=self.course.id,
mode_slug=mode_slug,
mode_display_name=mode_slug,
)
# Reverse the expected next URL, if one is provided
# (otherwise, use an empty string, which the JavaScript client
# interprets as a redirect to the dashboard)
full_url = (
reverse(next_url, kwargs={'course_id': unicode(self.course.id)})
if next_url else next_url
)
# Enroll in the course and verify the URL we get sent to
resp = self._change_enrollment('enroll')
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp.content, full_url)
# If we're not expecting to be enrolled, verify that this is the case
if enrollment_mode is None:
self.assertFalse(CourseEnrollment.is_enrolled(self.user, self.course.id))
# Otherwise, verify that we're enrolled with the expected course mode
else:
self.assertTrue(CourseEnrollment.is_enrolled(self.user, self.course.id))
course_mode, is_active = CourseEnrollment.enrollment_mode_for_user(self.user, self.course.id)
self.assertTrue(is_active)
self.assertEqual(course_mode, enrollment_mode)
def test_unenroll(self):
# Enroll the student in the course
CourseEnrollment.enroll(self.user, self.course.id, mode="honor")
# Attempt to unenroll the student
resp = self._change_enrollment('unenroll')
self.assertEqual(resp.status_code, 200)
# Expect that we're no longer enrolled
self.assertFalse(CourseEnrollment.is_enrolled(self.user, self.course.id))
@patch.dict(settings.FEATURES, {'ENABLE_MKTG_EMAIL_OPT_IN': True})
@patch('openedx.core.djangoapps.user_api.preferences.api.update_email_opt_in')
@ddt.data(
([], 'true'),
([], 'false'),
([], None),
(['honor', 'verified'], 'true'),
(['honor', 'verified'], 'false'),
(['honor', 'verified'], None),
(['professional'], 'true'),
(['professional'], 'false'),
(['professional'], None),
(['no-id-professional'], 'true'),
(['no-id-professional'], 'false'),
(['no-id-professional'], None),
)
@ddt.unpack
def test_enroll_with_email_opt_in(self, course_modes, email_opt_in, mock_update_email_opt_in):
# Create the course modes (if any) required for this test case
for mode_slug in course_modes:
CourseModeFactory.create(
course_id=self.course.id,
mode_slug=mode_slug,
mode_display_name=mode_slug,
)
# Enroll in the course
self._change_enrollment('enroll', email_opt_in=email_opt_in)
# Verify that the profile API has been called as expected
if email_opt_in is not None:
opt_in = email_opt_in == 'true'
mock_update_email_opt_in.assert_called_once_with(self.user, self.course.org, opt_in)
else:
self.assertFalse(mock_update_email_opt_in.called)
@patch.dict(settings.FEATURES, {'EMBARGO': True})
def test_embargo_restrict(self):
# When accessing the course from an embargoed country,
# we should be blocked.
with restrict_course(self.course.id) as redirect_url:
response = self._change_enrollment('enroll')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, redirect_url)
# Verify that we weren't enrolled
is_enrolled = CourseEnrollment.is_enrolled(self.user, self.course.id)
self.assertFalse(is_enrolled)
@patch.dict(settings.FEATURES, {'EMBARGO': True})
def test_embargo_allow(self):
response = self._change_enrollment('enroll')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, '')
# Verify that we were enrolled
is_enrolled = CourseEnrollment.is_enrolled(self.user, self.course.id)
self.assertTrue(is_enrolled)
def test_user_not_authenticated(self):
# Log out, so we're no longer authenticated
self.client.logout()
# Try to enroll, expecting a forbidden response
resp = self._change_enrollment('enroll')
self.assertEqual(resp.status_code, 403)
def test_missing_course_id_param(self):
resp = self.client.post(
reverse('change_enrollment'),
{'enrollment_action': 'enroll'}
)
self.assertEqual(resp.status_code, 400)
def test_unenroll_not_enrolled_in_course(self):
# Try unenroll without first enrolling in the course
resp = self._change_enrollment('unenroll')
self.assertEqual(resp.status_code, 400)
def test_invalid_enrollment_action(self):
resp = self._change_enrollment('not_an_action')
self.assertEqual(resp.status_code, 400)
def test_with_invalid_course_id(self):
CourseEnrollment.enroll(self.user, self.course.id, mode="honor")
resp = self._change_enrollment('unenroll', course_id="edx/")
self.assertEqual(resp.status_code, 400)
def _change_enrollment(self, action, course_id=None, email_opt_in=None):
"""Change the student's enrollment status in a course.
Args:
action (str): The action to perform (either "enroll" or "unenroll")
Keyword Args:
course_id (unicode): If provided, use this course ID. Otherwise, use the
course ID created in the setup for this test.
email_opt_in (unicode): If provided, pass this value along as
an additional GET parameter.
Returns:
Response
"""
if course_id is None:
course_id = unicode(self.course.id)
params = {
'enrollment_action': action,
'course_id': course_id
}
if email_opt_in:
params['email_opt_in'] = email_opt_in
return self.client.post(reverse('change_enrollment'), params)
| agpl-3.0 |
nolanliou/tensorflow | tensorflow/contrib/session_bundle/example/export_half_plus_two.py | 75 | 6029 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Exports a toy linear regression inference graph.
Exports a TensorFlow graph to /tmp/half_plus_two/ based on the Exporter
format.
This graph calculates,
y = a*x + b
where a and b are variables with a=0.5 and b=2.
Output from this program is typically used to exercise Session
loading and execution code.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import tensorflow as tf
from tensorflow.contrib.session_bundle import exporter
FLAGS = None
def Export(export_dir, use_checkpoint_v2):
with tf.Session() as sess:
# Make model parameters a&b variables instead of constants to
# exercise the variable reloading mechanisms.
a = tf.Variable(0.5, name="a")
b = tf.Variable(2.0, name="b")
# Create a placeholder for serialized tensorflow.Example messages to be fed.
serialized_tf_example = tf.placeholder(tf.string, name="tf_example")
# Parse the tensorflow.Example looking for a feature named "x" with a single
# floating point value.
feature_configs = {"x": tf.FixedLenFeature([1], dtype=tf.float32),}
tf_example = tf.parse_example(serialized_tf_example, feature_configs)
# Use tf.identity() to assign name
x = tf.identity(tf_example["x"], name="x")
# Calculate, y = a*x + b
y = tf.add(tf.multiply(a, x), b, name="y")
# Setup a standard Saver for our variables.
save = tf.train.Saver(
{
"a": a,
"b": b
},
sharded=True,
write_version=tf.train.SaverDef.V2 if use_checkpoint_v2 else
tf.train.SaverDef.V1)
# asset_path contains the base directory of assets used in training (e.g.
# vocabulary files).
original_asset_path = tf.constant("/tmp/original/export/assets")
# Ops reading asset files should reference the asset_path tensor
# which stores the original asset path at training time and the
# overridden assets directory at restore time.
asset_path = tf.Variable(original_asset_path,
name="asset_path",
trainable=False,
collections=[])
assign_asset_path = asset_path.assign(original_asset_path)
# Use a fixed global step number.
global_step_tensor = tf.Variable(123, name="global_step")
# Create a RegressionSignature for our input and output.
regression_signature = exporter.regression_signature(
input_tensor=serialized_tf_example,
# Use tf.identity here because we export two signatures here.
# Otherwise only graph for one of the signatures will be loaded
# (whichever is created first) during serving.
output_tensor=tf.identity(y))
named_graph_signature = {
"inputs": exporter.generic_signature({"x": x}),
"outputs": exporter.generic_signature({"y": y})
}
# Create two filename assets and corresponding tensors.
# TODO(b/26254158) Consider adding validation of file existence as well as
# hashes (e.g. sha1) for consistency.
original_filename1 = tf.constant("hello1.txt")
tf.add_to_collection(tf.GraphKeys.ASSET_FILEPATHS, original_filename1)
filename1 = tf.Variable(original_filename1,
name="filename1",
trainable=False,
collections=[])
assign_filename1 = filename1.assign(original_filename1)
original_filename2 = tf.constant("hello2.txt")
tf.add_to_collection(tf.GraphKeys.ASSET_FILEPATHS, original_filename2)
filename2 = tf.Variable(original_filename2,
name="filename2",
trainable=False,
collections=[])
assign_filename2 = filename2.assign(original_filename2)
# Init op contains a group of all variables that we assign.
init_op = tf.group(assign_asset_path, assign_filename1, assign_filename2)
# CopyAssets is used as a callback during export to copy files to the
# given export directory.
def CopyAssets(filepaths, export_path):
print("copying asset files to: %s" % export_path)
for filepath in filepaths:
print("copying asset file: %s" % filepath)
# Run an export.
tf.global_variables_initializer().run()
export = exporter.Exporter(save)
export.init(
sess.graph.as_graph_def(),
init_op=init_op,
default_graph_signature=regression_signature,
named_graph_signatures=named_graph_signature,
assets_collection=tf.get_collection(tf.GraphKeys.ASSET_FILEPATHS),
assets_callback=CopyAssets)
export.export(export_dir, global_step_tensor, sess)
def main(_):
Export(FLAGS.export_dir, FLAGS.use_checkpoint_v2)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.register("type", "bool", lambda v: v.lower() == "true")
parser.add_argument(
"--export_dir",
type=str,
default="/tmp/half_plus_two",
help="Directory where to export inference model."
)
parser.add_argument(
"--use_checkpoint_v2",
type="bool",
nargs="?",
const=True,
default=False,
help="If true, write v2 checkpoint files.")
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| apache-2.0 |
tinkhaven-organization/odoo | addons/l10n_th/__init__.py | 893 | 1045 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
root-mirror/root | bindings/pyroot/cppyy/cppyy/test/test_pythonify.py | 20 | 16364 | import py, os, sys
from pytest import raises
from .support import setup_make, pylong
currpath = py.path.local(__file__).dirpath()
test_dct = str(currpath.join("example01Dict"))
def setup_module(mod):
setup_make("example01")
class TestPYTHONIFY:
def setup_class(cls):
cls.test_dct = test_dct
import cppyy
cls.example01 = cppyy.load_reflection_info(cls.test_dct)
def test01_load_dictionary_cache(self):
"""Test whether loading a dictionary twice results in the same object"""
import cppyy
lib2 = cppyy.load_reflection_info(self.test_dct)
assert self.example01 is lib2
def test02_finding_classes(self):
"""Test the lookup of a class, and its caching"""
import cppyy
example01_class = cppyy.gbl.example01
cl2 = cppyy.gbl.example01
assert example01_class is cl2
with raises(AttributeError):
cppyy.gbl.nonexistingclass
def test03_calling_static_functions(self):
"""Test calling of static methods"""
import cppyy, sys, math
example01_class = cppyy.gbl.example01
res = example01_class.staticAddOneToInt(1)
assert res == 2
res = example01_class.staticAddOneToInt(pylong(1))
assert res == 2
res = example01_class.staticAddOneToInt(1, 2)
assert res == 4
res = example01_class.staticAddOneToInt(-1)
assert res == 0
maxint32 = int(2 ** 31 - 1)
res = example01_class.staticAddOneToInt(maxint32-1)
assert res == maxint32
res = example01_class.staticAddOneToInt(maxint32)
assert res == -maxint32-1
raises(TypeError, example01_class.staticAddOneToInt, 1, [])
raises(TypeError, example01_class.staticAddOneToInt, 1.)
raises(TypeError, example01_class.staticAddOneToInt, maxint32+1)
res = example01_class.staticAddToDouble(0.09)
assert res == 0.09 + 0.01
res = example01_class.staticAtoi("1")
assert res == 1
res = example01_class.staticStrcpy("aap") # TODO: this leaks
assert res == "aap"
res = example01_class.staticStrcpy(u"aap") # TODO: id.
assert res == "aap"
raises(TypeError, example01_class.staticStrcpy, 1.) # TODO: id.
def test04_constructing_and_calling(self):
"""Test object and method calls"""
import cppyy
example01_class = cppyy.gbl.example01
assert example01_class.getCount() == 0
instance = example01_class(7)
assert example01_class.getCount() == 1
res = instance.addDataToInt(4)
assert res == 11
res = instance.addDataToInt(-4)
assert res == 3
instance.__destruct__()
assert example01_class.getCount() == 0
raises(ReferenceError, instance.addDataToInt, 4)
instance = example01_class(7)
instance2 = example01_class(8)
assert example01_class.getCount() == 2
instance.__destruct__()
assert example01_class.getCount() == 1
instance2.__destruct__()
assert example01_class.getCount() == 0
instance = example01_class(13)
res = instance.addDataToDouble(16)
assert round(res-29, 8) == 0.
instance.__destruct__()
instance = example01_class(-13)
res = instance.addDataToDouble(16)
assert round(res-3, 8) == 0.
instance.__destruct__()
instance = example01_class(42)
assert example01_class.getCount() == 1
res = instance.addDataToAtoi("13")
assert res == 55
res = instance.addToStringValue("12") # TODO: this leaks
assert res == "54"
res = instance.addToStringValue("-12") # TODO: this leaks
assert res == "30"
res = instance.staticAddOneToInt(pylong(1))
assert res == 2
instance.__destruct__()
assert example01_class.getCount() == 0
def test05_passing_object_by_pointer(self):
"""Pass object by pointer"""
import cppyy
example01_class = cppyy.gbl.example01
payload_class = cppyy.gbl.payload
e = example01_class(14)
pl = payload_class(3.14)
assert round(pl.getData()-3.14, 8) == 0
example01_class.staticSetPayload(pl, 41.)
assert pl.getData() == 41.
example01_class.staticSetPayload(pl, 43.)
assert pl.getData() == 43.
e.staticSetPayload(pl, 45.)
assert pl.getData() == 45.
e.setPayload(pl)
assert round(pl.getData()-14., 8) == 0
pl.__destruct__()
e.__destruct__()
assert example01_class.getCount() == 0
def test06_returning_object_by_pointer(self):
"""Return an object py pointer"""
import cppyy
example01_class = cppyy.gbl.example01
payload_class = cppyy.gbl.payload
pl = payload_class(3.14)
assert round(pl.getData()-3.14, 8) == 0
pl2 = example01_class.staticCyclePayload(pl, 38.)
assert pl2.getData() == 38.
e = example01_class(14)
pl2 = e.cyclePayload(pl)
assert round(pl2.getData()-14., 8) == 0
pl.__destruct__()
e.__destruct__()
assert example01_class.getCount() == 0
def test07_returning_object_by_value(self):
"""Return an object by value"""
import cppyy
example01_class = cppyy.gbl.example01
payload_class = cppyy.gbl.payload
pl = payload_class(3.14)
assert round(pl.getData()-3.14, 8) == 0
pl2 = example01_class.staticCopyCyclePayload(pl, 38.)
assert pl2.getData() == 38.
pl2.__destruct__()
e = example01_class(14)
pl2 = e.copyCyclePayload(pl)
assert round(pl2.getData()-14., 8) == 0
pl2.__destruct__()
pl.__destruct__()
e.__destruct__()
assert example01_class.getCount() == 0
def test08_global_functions(self):
"""Call a global function"""
import cppyy
assert cppyy.gbl.globalAddOneToInt(3) == 4 # creation lookup
assert cppyy.gbl.globalAddOneToInt(3) == 4 # cached lookup
assert cppyy.gbl.ns_example01.globalAddOneToInt(4) == 5
assert cppyy.gbl.ns_example01.globalAddOneToInt(4) == 5
def test09_memory(self):
"""Test proper C++ destruction by the garbage collector"""
import cppyy, gc
example01_class = cppyy.gbl.example01
payload_class = cppyy.gbl.payload
pl = payload_class(3.14)
assert payload_class.count == 1
assert round(pl.getData()-3.14, 8) == 0
pl2 = example01_class.staticCopyCyclePayload(pl, 38.)
assert payload_class.count == 2
assert pl2.getData() == 38.
pl2 = None
gc.collect()
assert payload_class.count == 1
e = example01_class(14)
pl2 = e.copyCyclePayload(pl)
assert payload_class.count == 2
assert round(pl2.getData()-14., 8) == 0
pl2 = None
gc.collect()
assert payload_class.count == 1
pl = None
e = None
gc.collect()
assert payload_class.count == 0
assert example01_class.getCount() == 0
pl = payload_class(3.14)
pl_a = example01_class.staticCyclePayload(pl, 66.)
pl_a.getData() == 66.
assert payload_class.count == 1
pl_a = None
pl = None
gc.collect()
assert payload_class.count == 0
# TODO: need ReferenceError on touching pl_a
def test10_default_arguments(self):
"""Test propagation of default function arguments"""
import cppyy
a = cppyy.gbl.ArgPasser()
# NOTE: when called through the stub, default args are fine
f = a.stringRef
s = cppyy.gbl.std.string
assert f(s("aap"), 0, s("noot")) == "aap"
assert f(s("noot"), 1) == "default"
assert f(s("mies")) == "mies"
for itype in ['short', 'ushort', 'int', 'uint', 'long', 'ulong']:
g = getattr(a, '%sValue' % itype)
raises(TypeError, g, 1, 2, 3, 4, 6)
assert g(11, 0, 12, 13) == 11
assert g(11, 1, 12, 13) == 12
assert g(11, 1, 12) == 12
assert g(11, 2, 12) == 2
assert g(11, 1) == 1
assert g(11, 2) == 2
assert g(11) == 11
for ftype in ['float', 'double']:
g = getattr(a, '%sValue' % ftype)
raises(TypeError, g, 1., 2, 3., 4., 6.)
assert g(11., 0, 12., 13.) == 11.
assert g(11., 1, 12., 13.) == 12.
assert g(11., 1, 12.) == 12.
assert g(11., 2, 12.) == 2.
assert g(11., 1) == 1.
assert g(11., 2) == 2.
assert g(11.) == 11.
def test11_overload_on_arguments(self):
"""Test functions overloaded on arguments"""
import cppyy
e = cppyy.gbl.example01(1)
assert e.addDataToInt(2) == 3
assert e.overloadedAddDataToInt(3) == 4
assert e.overloadedAddDataToInt(4, 5) == 10
assert e.overloadedAddDataToInt(6, 7, 8) == 22
def test12_typedefs(self):
"""Test access and use of typedefs"""
import cppyy
assert cppyy.gbl.example01 == cppyy.gbl.example01_t
def test13_underscore_in_class_name(self):
"""Test recognition of '_' as part of a valid class name"""
import cppyy
assert cppyy.gbl.z_ == cppyy.gbl.z_
z = cppyy.gbl.z_()
assert hasattr(z, 'myint')
assert z.gime_z_(z)
def test14_bound_unbound_calls(self):
"""Test (un)bound method calls"""
import cppyy
raises(TypeError, cppyy.gbl.example01.addDataToInt, 1)
meth = cppyy.gbl.example01.addDataToInt
raises(TypeError, meth)
raises(TypeError, meth, 1)
e = cppyy.gbl.example01(2)
assert 5 == meth(e, 3)
def test15_installable_function(self):
"""Test installing and calling global C++ function as python method"""
import cppyy
cppyy.gbl.example01.fresh = cppyy.gbl.installableAddOneToInt
e = cppyy.gbl.example01(0)
assert 2 == e.fresh(1)
assert 3 == e.fresh(2)
def test16_subclassing(self):
"""A sub-class on the python side should have that class as type"""
import cppyy, gc
gc.collect()
example01 = cppyy.gbl.example01
assert example01.getCount() == 0
o = example01()
assert type(o) == example01
assert example01.getCount() == 1
o.__destruct__()
assert example01.getCount() == 0
class MyClass1(example01):
def myfunc(self):
return 1
o = MyClass1()
assert type(o) == MyClass1
assert isinstance(o, example01)
assert example01.getCount() == 1
assert o.myfunc() == 1
o.__destruct__()
assert example01.getCount() == 0
class MyClass2(example01):
def __init__(self, what):
example01.__init__(self)
self.what = what
o = MyClass2('hi')
assert type(o) == MyClass2
assert example01.getCount() == 1
assert o.what == 'hi'
o.__destruct__()
assert example01.getCount() == 0
def test17_chaining(self):
"""Respective return values of temporaries should not go away"""
import cppyy
cppyy.cppdef("""namespace Lifeline {
struct A1 { A1(int x) : x(x) {} int x; };
struct A2 { A2(int x) { v.emplace_back(x); } std::vector<A1> v; std::vector<A1>& get() { return v; } };
struct A3 { A3(int x) { v.emplace_back(x); } std::vector<A2> v; std::vector<A2>& get() { return v; } };
struct A4 { A4(int x) { v.emplace_back(x); } std::vector<A3> v; std::vector<A3>& get() { return v; } };
struct A5 { A5(int x) { v.emplace_back(x); } std::vector<A4> v; std::vector<A4>& get() { return v; } };
A5 gime(int i) { return A5(i); }
}""")
assert cppyy.gbl.Lifeline.gime(42).get()[0].get()[0].get()[0].get()[0].x == 42
def test18_keywords(self):
"""Use of keyword arguments"""
import cppyy
cppyy.cppdef("""namespace KeyWords {
struct A {
A(std::initializer_list<int> vals) : fVals(vals) {}
std::vector<int> fVals;
};
struct B {
B() = default;
B(const A& in_A, const A& out_A) : fVal(42), fIn(in_A), fOut(out_A) {}
B(int val, const A& in_A, const A& out_A) : fVal(val), fIn(in_A), fOut(out_A) {}
int fVal;
A fIn, fOut;
};
int callme(int choice, int a, int b, int c) {
if (choice == 0) return a;
if (choice == 1) return b;
return c;
}
struct C {
int fChoice;
};
int callme_c(const C& o, int a, int b, int c) {
return callme(o.fChoice, a, b, c);
} }""")
# constructor and implicit conversion with keywords
A = cppyy.gbl.KeyWords.A
B = cppyy.gbl.KeyWords.B
def verify_b(b, val, ti, to):
assert b.fVal == val
assert b.fIn.fVals.size() == len(ti)
assert tuple(b.fIn.fVals) == ti
assert b.fOut.fVals.size() == len(to)
assert tuple(b.fOut.fVals) == to
b = B(in_A=(256,), out_A=(32,))
verify_b(b, 42, (256,), (32,))
b = B(out_A=(32,), in_A=(256,))
verify_b(b, 42, (256,), (32,))
with raises(TypeError):
b = B(in_B=(256,), out_A=(32,))
b = B(17, in_A=(23,), out_A=(78,))
verify_b(b, 17, (23,), (78,))
with raises(TypeError):
b = B(17, val=23, out_A=(78,))
with raises(TypeError):
b = B(17, out_A=(78,))
# global function with keywords
callme = cppyy.gbl.KeyWords.callme
for i in range(3):
assert callme(i, a=1, b=2, c=3) == i+1
assert callme(i, b=2, c=3, a=1) == i+1
assert callme(i, c=3, a=1, b=2) == i+1
with raises(TypeError):
callme(0, a=1, b=2, d=3)
with raises(TypeError):
callme(0, 1, a=2, c=3)
with raises(TypeError):
callme(0, a=1, b=2)
# global function as method with keywords
c = cppyy.gbl.KeyWords.C()
cppyy.gbl.KeyWords.C.callme = cppyy.gbl.KeyWords.callme_c
for i in range(3):
c.fChoice = i
assert c.callme(a=1, b=2, c=3) == i+1
assert c.callme(b=2, c=3, a=1) == i+1
assert c.callme(c=3, a=1, b=2) == i+1
c.fChoice = 0
with raises(TypeError):
c.callme(a=1, b=2, d=3)
with raises(TypeError):
c.callme(1, a=2, c=3)
with raises(TypeError):
c.callme(a=1, b=2)
class TestPYTHONIFY_UI:
def setup_class(cls):
cls.test_dct = test_dct
import cppyy
cls.example01 = cppyy.load_reflection_info(cls.test_dct)
def test01_pythonizations(self):
"""Test addition of user-defined pythonizations"""
import cppyy
def example01a_pythonize(pyclass, pyname):
if pyname == 'example01a':
def getitem(self, idx):
return self.addDataToInt(idx)
pyclass.__getitem__ = getitem
cppyy.py.add_pythonization(example01a_pythonize)
e = cppyy.gbl.example01a(1)
assert e[0] == 1
assert e[1] == 2
assert e[5] == 6
def test02_fragile_pythonizations(self):
"""Test pythonizations error reporting"""
import cppyy
example01_pythonize = 1
raises(TypeError, cppyy.py.add_pythonization, example01_pythonize)
def test03_write_access_to_globals(self):
"""Test overwritability of globals"""
import cppyy
oldval = cppyy.gbl.ns_example01.gMyGlobalInt
assert oldval == 99
proxy = cppyy.gbl.ns_example01.__class__.__dict__['gMyGlobalInt']
cppyy.gbl.ns_example01.gMyGlobalInt = 3
assert proxy.__get__(proxy, None) == 3
cppyy.gbl.ns_example01.gMyGlobalInt = oldval
| lgpl-2.1 |
vaygr/ansible | test/sanity/validate-modules/utils.py | 5 | 4256 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2015 Matt Martz <matt@sivel.net>
# Copyright (C) 2015 Rackspace US, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import ast
import sys
from io import BytesIO, TextIOWrapper
import yaml
import yaml.reader
from ansible.module_utils._text import to_text
from ansible.module_utils.parsing.convert_bool import boolean
class AnsibleTextIOWrapper(TextIOWrapper):
def write(self, s):
super(AnsibleTextIOWrapper, self).write(to_text(s, self.encoding, errors='replace'))
def find_globals(g, tree):
"""Uses AST to find globals in an ast tree"""
for child in tree:
if hasattr(child, 'body') and isinstance(child.body, list):
find_globals(g, child.body)
elif isinstance(child, (ast.FunctionDef, ast.ClassDef)):
g.add(child.name)
continue
elif isinstance(child, ast.Assign):
try:
g.add(child.targets[0].id)
except (IndexError, AttributeError):
pass
elif isinstance(child, ast.Import):
g.add(child.names[0].name)
elif isinstance(child, ast.ImportFrom):
for name in child.names:
g_name = name.asname or name.name
if g_name == '*':
continue
g.add(g_name)
class CaptureStd():
"""Context manager to handle capturing stderr and stdout"""
def __enter__(self):
self.sys_stdout = sys.stdout
self.sys_stderr = sys.stderr
sys.stdout = self.stdout = AnsibleTextIOWrapper(BytesIO(), encoding=self.sys_stdout.encoding)
sys.stderr = self.stderr = AnsibleTextIOWrapper(BytesIO(), encoding=self.sys_stderr.encoding)
return self
def __exit__(self, exc_type, exc_value, traceback):
sys.stdout = self.sys_stdout
sys.stderr = self.sys_stderr
def get(self):
"""Return ``(stdout, stderr)``"""
return self.stdout.buffer.getvalue(), self.stderr.buffer.getvalue()
def parse_yaml(value, lineno, module, name, load_all=False):
traces = []
errors = []
data = None
if load_all:
loader = yaml.safe_load_all
else:
loader = yaml.safe_load
try:
data = loader(value)
if load_all:
data = list(data)
except yaml.MarkedYAMLError as e:
e.problem_mark.line += lineno - 1
e.problem_mark.name = '%s.%s' % (module, name)
errors.append({
'msg': '%s is not valid YAML' % name,
'line': e.problem_mark.line + 1,
'column': e.problem_mark.column + 1
})
traces.append(e)
except yaml.reader.ReaderError as e:
traces.append(e)
# TODO: Better line/column detection
errors.append({
'msg': ('%s is not valid YAML. Character '
'0x%x at position %d.' % (name, e.character, e.position)),
'line': lineno
})
except yaml.YAMLError as e:
traces.append(e)
errors.append({
'msg': '%s is not valid YAML: %s: %s' % (name, type(e), e),
'line': lineno
})
return data, errors, traces
def maybe_convert_bool(value):
"""Safe conversion to boolean, catching TypeError and returning the original result
Only used in doc<->arg_spec comparisons
"""
try:
return boolean(value)
except TypeError:
return value
def compare_unordered_lists(a, b):
"""Safe list comparisons
Supports:
- unordered lists
- unhashable elements
"""
return len(a) == len(b) and all(x in b for x in a)
| gpl-3.0 |
rschwiebert/galgebra | examples/Old Format/terminal_check.py | 1 | 10489 | #!/usr/bin/python
import sys
import sys
from sympy import Symbol,symbols,sin,cos,Rational,expand,simplify,collect
from printer import enhance_print,Get_Program,Print_Function,Format
from mv import MV,Com,Nga,ONE,ZERO
def basic_multivector_operations():
Print_Function()
(ex,ey,ez) = MV.setup('e*x|y|z')
A = MV('A','mv')
A.Fmt(1,'A')
A.Fmt(2,'A')
A.Fmt(3,'A')
X = MV('X','vector')
Y = MV('Y','vector')
print 'g_{ij} =\n',MV.metric
X.Fmt(1,'X')
Y.Fmt(1,'Y')
(X*Y).Fmt(2,'X*Y')
(X^Y).Fmt(2,'X^Y')
(X|Y).Fmt(2,'X|Y')
(ex,ey) = MV.setup('e*x|y')
print 'g_{ij} =\n',MV.metric
X = MV('X','vector')
A = MV('A','spinor')
X.Fmt(1,'X')
A.Fmt(1,'A')
(X|A).Fmt(2,'X|A')
(X<A).Fmt(2,'X<A')
(A>X).Fmt(2,'A>X')
(ex,ey) = MV.setup('e*x|y',metric='[1,1]')
print 'g_{ii} =\n',MV.metric
X = MV('X','vector')
A = MV('A','spinor')
X.Fmt(1,'X')
A.Fmt(1,'A')
(X*A).Fmt(2,'X*A')
(X|A).Fmt(2,'X|A')
(X<A).Fmt(2,'X<A')
(X>A).Fmt(2,'X>A')
(A*X).Fmt(2,'A*X')
(A|X).Fmt(2,'A|X')
(A<X).Fmt(2,'A<X')
(A>X).Fmt(2,'A>X')
return
def check_generalized_BAC_CAB_formulas():
Print_Function()
(a,b,c,d,e) = MV.setup('a b c d e')
print 'g_{ij} =\n',MV.metric
print 'a|(b*c) =',a|(b*c)
print 'a|(b^c) =',a|(b^c)
print 'a|(b^c^d) =',a|(b^c^d)
print 'a|(b^c)+c|(a^b)+b|(c^a) =',(a|(b^c))+(c|(a^b))+(b|(c^a))
print 'a*(b^c)-b*(a^c)+c*(a^b) =',a*(b^c)-b*(a^c)+c*(a^b)
print 'a*(b^c^d)-b*(a^c^d)+c*(a^b^d)-d*(a^b^c) =',a*(b^c^d)-b*(a^c^d)+c*(a^b^d)-d*(a^b^c)
print '(a^b)|(c^d) =',(a^b)|(c^d)
print '((a^b)|c)|d =',((a^b)|c)|d
print '(a^b)x(c^d) =',Com(a^b,c^d)
print '(a|(b^c))|(d^e) =',(a|(b^c))|(d^e)
return
def derivatives_in_rectangular_coordinates():
Print_Function()
X = (x,y,z) = symbols('x y z')
(ex,ey,ez,grad) = MV.setup('e_x e_y e_z',metric='[1,1,1]',coords=X)
f = MV('f','scalar',fct=True)
A = MV('A','vector',fct=True)
B = MV('B','grade2',fct=True)
C = MV('C','mv',fct=True)
print 'f =',f
print 'A =',A
print 'B =',B
print 'C =',C
print 'grad*f =',grad*f
print 'grad|A =',grad|A
print 'grad*A =',grad*A
print '-I*(grad^A) =',-MV.I*(grad^A)
print 'grad*B =',grad*B
print 'grad^B =',grad^B
print 'grad|B =',grad|B
print 'grad<A =',grad<A
print 'grad>A =',grad>A
print 'grad<B =',grad<B
print 'grad>B =',grad>B
print 'grad<C =',grad<C
print 'grad>C =',grad>C
return
def derivatives_in_spherical_coordinates():
Print_Function()
X = (r,th,phi) = symbols('r theta phi')
curv = [[r*cos(phi)*sin(th),r*sin(phi)*sin(th),r*cos(th)],[1,r,r*sin(th)]]
(er,eth,ephi,grad) = MV.setup('e_r e_theta e_phi',metric='[1,1,1]',coords=X,curv=curv)
f = MV('f','scalar',fct=True)
A = MV('A','vector',fct=True)
B = MV('B','grade2',fct=True)
print 'f =',f
print 'A =',A
print 'B =',B
print 'grad*f =',grad*f
print 'grad|A =',grad|A
print '-I*(grad^A) =',-MV.I*(grad^A)
print 'grad^B =',grad^B
return
def rounding_numerical_components():
Print_Function()
(ex,ey,ez) = MV.setup('e_x e_y e_z',metric='[1,1,1]')
X = 1.2*ex+2.34*ey+0.555*ez
Y = 0.333*ex+4*ey+5.3*ez
print 'X =',X
print 'Nga(X,2) =',Nga(X,2)
print 'X*Y =',X*Y
print 'Nga(X*Y,2) =',Nga(X*Y,2)
return
def noneuclidian_distance_calculation():
from sympy import solve,sqrt
Print_Function()
metric = '0 # #,# 0 #,# # 1'
(X,Y,e) = MV.setup('X Y e',metric)
print 'g_{ij} =',MV.metric
print '(X^Y)**2 =',(X^Y)*(X^Y)
L = X^Y^e
B = L*e # D&L 10.152
print 'B =',B
Bsq = B*B
print 'B**2 =',Bsq
Bsq = Bsq.scalar()
print '#L = X^Y^e is a non-euclidian line'
print 'B = L*e =',B
BeBr =B*e*B.rev()
print 'B*e*B.rev() =',BeBr
print 'B**2 =',B*B
print 'L**2 =',L*L # D&L 10.153
(s,c,Binv,M,S,C,alpha,XdotY,Xdote,Ydote) = symbols('s c (1/B) M S C alpha (X.Y) (X.e) (Y.e)')
Bhat = Binv*B # D&L 10.154
R = c+s*Bhat # Rotor R = exp(alpha*Bhat/2)
print 's = sinh(alpha/2) and c = cosh(alpha/2)'
print 'exp(alpha*B/(2*|B|)) =',R
Z = R*X*R.rev() # D&L 10.155
Z.obj = expand(Z.obj)
Z.obj = Z.obj.collect([Binv,s,c,XdotY])
Z.Fmt(3,'R*X*R.rev()')
W = Z|Y # Extract scalar part of multivector
# From this point forward all calculations are with sympy scalars
print 'Objective is to determine value of C = cosh(alpha) such that W = 0'
W = W.scalar()
print 'Z|Y =',W
W = expand(W)
W = simplify(W)
W = W.collect([s*Binv])
M = 1/Bsq
W = W.subs(Binv**2,M)
W = simplify(W)
Bmag = sqrt(XdotY**2-2*XdotY*Xdote*Ydote)
W = W.collect([Binv*c*s,XdotY])
#Double angle substitutions
W = W.subs(2*XdotY**2-4*XdotY*Xdote*Ydote,2/(Binv**2))
W = W.subs(2*c*s,S)
W = W.subs(c**2,(C+1)/2)
W = W.subs(s**2,(C-1)/2)
W = simplify(W)
W = W.subs(1/Binv,Bmag)
W = expand(W)
print 'S = sinh(alpha) and C = cosh(alpha)'
print 'W =',W
Wd = collect(W,[C,S],exact=True,evaluate=False)
Wd_1 = Wd[ONE]
Wd_C = Wd[C]
Wd_S = Wd[S]
print 'Scalar Coefficient =',Wd_1
print 'Cosh Coefficient =',Wd_C
print 'Sinh Coefficient =',Wd_S
print '|B| =',Bmag
Wd_1 = Wd_1.subs(Bmag,1/Binv)
Wd_C = Wd_C.subs(Bmag,1/Binv)
Wd_S = Wd_S.subs(Bmag,1/Binv)
lhs = Wd_1+Wd_C*C
rhs = -Wd_S*S
lhs = lhs**2
rhs = rhs**2
W = expand(lhs-rhs)
W = expand(W.subs(1/Binv**2,Bmag**2))
W = expand(W.subs(S**2,C**2-1))
W = W.collect([C,C**2],evaluate=False)
a = simplify(W[C**2])
b = simplify(W[C])
c = simplify(W[ONE])
print 'Require a*C**2+b*C+c = 0'
print 'a =',a
print 'b =',b
print 'c =',c
x = Symbol('x')
C = solve(a*x**2+b*x+c,x)[0]
print 'cosh(alpha) = C = -b/(2*a) =',expand(simplify(expand(C)))
return
HALF = Rational(1,2)
def F(x):
global n,nbar
Fx = HALF*((x*x)*n+2*x-nbar)
return(Fx)
def make_vector(a,n = 3):
if isinstance(a,str):
sym_str = ''
for i in range(n):
sym_str += a+str(i+1)+' '
sym_lst = list(symbols(sym_str))
sym_lst.append(ZERO)
sym_lst.append(ZERO)
a = MV(sym_lst,'vector')
return(F(a))
def conformal_representations_of_circles_lines_spheres_and_planes():
global n,nbar
Print_Function()
metric = '1 0 0 0 0,0 1 0 0 0,0 0 1 0 0,0 0 0 0 2,0 0 0 2 0'
(e1,e2,e3,n,nbar) = MV.setup('e_1 e_2 e_3 n nbar',metric)
print 'g_{ij} =\n',MV.metric
e = n+nbar
#conformal representation of points
A = make_vector(e1) # point a = (1,0,0) A = F(a)
B = make_vector(e2) # point b = (0,1,0) B = F(b)
C = make_vector(-e1) # point c = (-1,0,0) C = F(c)
D = make_vector(e3) # point d = (0,0,1) D = F(d)
X = make_vector('x',3)
print 'F(a) =',A
print 'F(b) =',B
print 'F(c) =',C
print 'F(d) =',D
print 'F(x) =',X
print 'a = e1, b = e2, c = -e1, and d = e3'
print 'A = F(a) = 1/2*(a*a*n+2*a-nbar), etc.'
print 'Circle through a, b, and c'
print 'Circle: A^B^C^X = 0 =',(A^B^C^X)
print 'Line through a and b'
print 'Line : A^B^n^X = 0 =',(A^B^n^X)
print 'Sphere through a, b, c, and d'
print 'Sphere: A^B^C^D^X = 0 =',(((A^B)^C)^D)^X
print 'Plane through a, b, and d'
print 'Plane : A^B^n^D^X = 0 =',(A^B^n^D^X)
L = (A^B^e)^X
L.Fmt(3,'Hyperbolic Circle: (A^B^e)^X = 0 =')
return
def properties_of_geometric_objects():
global n,nbar
Print_Function()
metric = '# # # 0 0,'+ \
'# # # 0 0,'+ \
'# # # 0 0,'+ \
'0 0 0 0 2,'+ \
'0 0 0 2 0'
(p1,p2,p3,n,nbar) = MV.setup('p1 p2 p3 n nbar',metric)
print 'g_{ij} =\n',MV.metric
P1 = F(p1)
P2 = F(p2)
P3 = F(p3)
print 'Extracting direction of line from L = P1^P2^n'
L = P1^P2^n
delta = (L|n)|nbar
print '(L|n)|nbar =',delta
print 'Extracting plane of circle from C = P1^P2^P3'
C = P1^P2^P3
delta = ((C^n)|n)|nbar
print '((C^n)|n)|nbar =',delta
print '(p2-p1)^(p3-p1) =',(p2-p1)^(p3-p1)
def extracting_vectors_from_conformal_2_blade():
global n,nbar
Print_Function()
metric = ' 0 -1 #,'+ \
'-1 0 #,'+ \
' # # #'
(P1,P2,a) = MV.setup('P1 P2 a',metric)
print 'g_{ij} =\n',MV.metric
B = P1^P2
Bsq = B*B
print 'B**2 =',Bsq
ap = a-(a^B)*B
print "a' = a-(a^B)*B =",ap
Ap = ap+ap*B
Am = ap-ap*B
print "A+ = a'+a'*B =",Ap
print "A- = a'-a'*B =",Am
print '(A+)^2 =',Ap*Ap
print '(A-)^2 =',Am*Am
aB = a|B
print 'a|B =',aB
return
def reciprocal_frame_test():
Print_Function()
metric = '1 # #,'+ \
'# 1 #,'+ \
'# # 1'
(e1,e2,e3) = MV.setup('e1 e2 e3',metric)
print 'g_{ij} =\n',MV.metric
E = e1^e2^e3
Esq = (E*E).scalar()
print 'E =',E
print 'E**2 =',Esq
Esq_inv = 1/Esq
E1 = (e2^e3)*E
E2 = (-1)*(e1^e3)*E
E3 = (e1^e2)*E
print 'E1 = (e2^e3)*E =',E1
print 'E2 =-(e1^e3)*E =',E2
print 'E3 = (e1^e2)*E =',E3
w = (E1|e2)
w = w.expand()
print 'E1|e2 =',w
w = (E1|e3)
w = w.expand()
print 'E1|e3 =',w
w = (E2|e1)
w = w.expand()
print 'E2|e1 =',w
w = (E2|e3)
w = w.expand()
print 'E2|e3 =',w
w = (E3|e1)
w = w.expand()
print 'E3|e1 =',w
w = (E3|e2)
w = w.expand()
print 'E3|e2 =',w
w = (E1|e1)
w = (w.expand()).scalar()
Esq = expand(Esq)
print '(E1|e1)/E**2 =',simplify(w/Esq)
w = (E2|e2)
w = (w.expand()).scalar()
print '(E2|e2)/E**2 =',simplify(w/Esq)
w = (E3|e3)
w = (w.expand()).scalar()
print '(E3|e3)/E**2 =',simplify(w/Esq)
return
def dummy():
return
def main():
Get_Program(True)
enhance_print()
basic_multivector_operations()
check_generalized_BAC_CAB_formulas()
derivatives_in_rectangular_coordinates()
derivatives_in_spherical_coordinates()
rounding_numerical_components()
#noneuclidian_distance_calculation()
conformal_representations_of_circles_lines_spheres_and_planes()
properties_of_geometric_objects()
extracting_vectors_from_conformal_2_blade()
reciprocal_frame_test()
return
if __name__ == "__main__":
main()
| bsd-3-clause |
edgewood/borg | src/borg/testsuite/shellpattern.py | 4 | 2698 | import re
import pytest
from .. import shellpattern
def check(path, pattern):
compiled = re.compile(shellpattern.translate(pattern))
return bool(compiled.match(path))
@pytest.mark.parametrize("path, patterns", [
# Literal string
("foo/bar", ["foo/bar"]),
("foo\\bar", ["foo\\bar"]),
# Non-ASCII
("foo/c/\u0152/e/bar", ["foo/*/\u0152/*/bar", "*/*/\u0152/*/*", "**/\u0152/*/*"]),
("\u00e4\u00f6\u00dc", ["???", "*", "\u00e4\u00f6\u00dc", "[\u00e4][\u00f6][\u00dc]"]),
# Question mark
("foo", ["fo?"]),
("foo", ["f?o"]),
("foo", ["f??"]),
("foo", ["?oo"]),
("foo", ["?o?"]),
("foo", ["??o"]),
("foo", ["???"]),
# Single asterisk
("", ["*"]),
("foo", ["*", "**", "***"]),
("foo", ["foo*"]),
("foobar", ["foo*"]),
("foobar", ["foo*bar"]),
("foobarbaz", ["foo*baz"]),
("bar", ["*bar"]),
("foobar", ["*bar"]),
("foo/bar", ["foo/*bar"]),
("foo/bar", ["foo/*ar"]),
("foo/bar", ["foo/*r"]),
("foo/bar", ["foo/*"]),
("foo/bar", ["foo*/bar"]),
("foo/bar", ["fo*/bar"]),
("foo/bar", ["f*/bar"]),
("foo/bar", ["*/bar"]),
# Double asterisk (matches 0..n directory layers)
("foo/bar", ["foo/**/bar"]),
("foo/1/bar", ["foo/**/bar"]),
("foo/1/22/333/bar", ["foo/**/bar"]),
("foo/", ["foo/**/"]),
("foo/1/", ["foo/**/"]),
("foo/1/22/333/", ["foo/**/"]),
("bar", ["**/bar"]),
("1/bar", ["**/bar"]),
("1/22/333/bar", ["**/bar"]),
("foo/bar/baz", ["foo/**/*"]),
# Set
("foo1", ["foo[12]"]),
("foo2", ["foo[12]"]),
("foo2/bar", ["foo[12]/*"]),
("f??f", ["f??f", "f[?][?]f"]),
("foo]", ["foo[]]"]),
# Inverted set
("foo3", ["foo[!12]"]),
("foo^", ["foo[^!]"]),
("foo!", ["foo[^!]"]),
])
def test_match(path, patterns):
for p in patterns:
assert check(path, p)
@pytest.mark.parametrize("path, patterns", [
("", ["?", "[]"]),
("foo", ["foo?"]),
("foo", ["?foo"]),
("foo", ["f?oo"]),
# do not match path separator
("foo/ar", ["foo?ar"]),
# do not match/cross over os.path.sep
("foo/bar", ["*"]),
("foo/bar", ["foo*bar"]),
("foo/bar", ["foo*ar"]),
("foo/bar", ["fo*bar"]),
("foo/bar", ["fo*ar"]),
# Double asterisk
("foobar", ["foo/**/bar"]),
# Two asterisks without slash do not match directory separator
("foo/bar", ["**"]),
# Double asterisk not matching filename
("foo/bar", ["**/"]),
# Set
("foo3", ["foo[12]"]),
# Inverted set
("foo1", ["foo[!12]"]),
("foo2", ["foo[!12]"]),
])
def test_mismatch(path, patterns):
for p in patterns:
assert not check(path, p)
| bsd-3-clause |
invisiblek/python-for-android | python3-alpha/python3-src/Lib/distutils/file_util.py | 54 | 7810 | """distutils.file_util
Utility functions for operating on single files.
"""
import os
from distutils.errors import DistutilsFileError
from distutils import log
# for generating verbose output in 'copy_file()'
_copy_action = { None: 'copying',
'hard': 'hard linking',
'sym': 'symbolically linking' }
def _copy_file_contents(src, dst, buffer_size=16*1024):
"""Copy the file 'src' to 'dst'; both must be filenames. Any error
opening either file, reading from 'src', or writing to 'dst', raises
DistutilsFileError. Data is read/written in chunks of 'buffer_size'
bytes (default 16k). No attempt is made to handle anything apart from
regular files.
"""
# Stolen from shutil module in the standard library, but with
# custom error-handling added.
fsrc = None
fdst = None
try:
try:
fsrc = open(src, 'rb')
except os.error as e:
raise DistutilsFileError("could not open '%s': %s" % (src, e.strerror))
if os.path.exists(dst):
try:
os.unlink(dst)
except os.error as e:
raise DistutilsFileError(
"could not delete '%s': %s" % (dst, e.strerror))
try:
fdst = open(dst, 'wb')
except os.error as e:
raise DistutilsFileError(
"could not create '%s': %s" % (dst, e.strerror))
while True:
try:
buf = fsrc.read(buffer_size)
except os.error as e:
raise DistutilsFileError(
"could not read from '%s': %s" % (src, e.strerror))
if not buf:
break
try:
fdst.write(buf)
except os.error as e:
raise DistutilsFileError(
"could not write to '%s': %s" % (dst, e.strerror))
finally:
if fdst:
fdst.close()
if fsrc:
fsrc.close()
def copy_file(src, dst, preserve_mode=1, preserve_times=1, update=0,
link=None, verbose=1, dry_run=0):
"""Copy a file 'src' to 'dst'. If 'dst' is a directory, then 'src' is
copied there with the same name; otherwise, it must be a filename. (If
the file exists, it will be ruthlessly clobbered.) If 'preserve_mode'
is true (the default), the file's mode (type and permission bits, or
whatever is analogous on the current platform) is copied. If
'preserve_times' is true (the default), the last-modified and
last-access times are copied as well. If 'update' is true, 'src' will
only be copied if 'dst' does not exist, or if 'dst' does exist but is
older than 'src'.
'link' allows you to make hard links (os.link) or symbolic links
(os.symlink) instead of copying: set it to "hard" or "sym"; if it is
None (the default), files are copied. Don't set 'link' on systems that
don't support it: 'copy_file()' doesn't check if hard or symbolic
linking is available.
Under Mac OS, uses the native file copy function in macostools; on
other systems, uses '_copy_file_contents()' to copy file contents.
Return a tuple (dest_name, copied): 'dest_name' is the actual name of
the output file, and 'copied' is true if the file was copied (or would
have been copied, if 'dry_run' true).
"""
# XXX if the destination file already exists, we clobber it if
# copying, but blow up if linking. Hmmm. And I don't know what
# macostools.copyfile() does. Should definitely be consistent, and
# should probably blow up if destination exists and we would be
# changing it (ie. it's not already a hard/soft link to src OR
# (not update) and (src newer than dst).
from distutils.dep_util import newer
from stat import ST_ATIME, ST_MTIME, ST_MODE, S_IMODE
if not os.path.isfile(src):
raise DistutilsFileError(
"can't copy '%s': doesn't exist or not a regular file" % src)
if os.path.isdir(dst):
dir = dst
dst = os.path.join(dst, os.path.basename(src))
else:
dir = os.path.dirname(dst)
if update and not newer(src, dst):
if verbose >= 1:
log.debug("not copying %s (output up-to-date)", src)
return (dst, 0)
try:
action = _copy_action[link]
except KeyError:
raise ValueError("invalid value '%s' for 'link' argument" % link)
if verbose >= 1:
if os.path.basename(dst) == os.path.basename(src):
log.info("%s %s -> %s", action, src, dir)
else:
log.info("%s %s -> %s", action, src, dst)
if dry_run:
return (dst, 1)
# If linking (hard or symbolic), use the appropriate system call
# (Unix only, of course, but that's the caller's responsibility)
elif link == 'hard':
if not (os.path.exists(dst) and os.path.samefile(src, dst)):
os.link(src, dst)
elif link == 'sym':
if not (os.path.exists(dst) and os.path.samefile(src, dst)):
os.symlink(src, dst)
# Otherwise (non-Mac, not linking), copy the file contents and
# (optionally) copy the times and mode.
else:
_copy_file_contents(src, dst)
if preserve_mode or preserve_times:
st = os.stat(src)
# According to David Ascher <da@ski.org>, utime() should be done
# before chmod() (at least under NT).
if preserve_times:
os.utime(dst, (st[ST_ATIME], st[ST_MTIME]))
if preserve_mode:
os.chmod(dst, S_IMODE(st[ST_MODE]))
return (dst, 1)
# XXX I suspect this is Unix-specific -- need porting help!
def move_file (src, dst,
verbose=1,
dry_run=0):
"""Move a file 'src' to 'dst'. If 'dst' is a directory, the file will
be moved into it with the same name; otherwise, 'src' is just renamed
to 'dst'. Return the new full name of the file.
Handles cross-device moves on Unix using 'copy_file()'. What about
other systems???
"""
from os.path import exists, isfile, isdir, basename, dirname
import errno
if verbose >= 1:
log.info("moving %s -> %s", src, dst)
if dry_run:
return dst
if not isfile(src):
raise DistutilsFileError("can't move '%s': not a regular file" % src)
if isdir(dst):
dst = os.path.join(dst, basename(src))
elif exists(dst):
raise DistutilsFileError(
"can't move '%s': destination '%s' already exists" %
(src, dst))
if not isdir(dirname(dst)):
raise DistutilsFileError(
"can't move '%s': destination '%s' not a valid path" %
(src, dst))
copy_it = False
try:
os.rename(src, dst)
except os.error as e:
(num, msg) = e
if num == errno.EXDEV:
copy_it = True
else:
raise DistutilsFileError(
"couldn't move '%s' to '%s': %s" % (src, dst, msg))
if copy_it:
copy_file(src, dst, verbose=verbose)
try:
os.unlink(src)
except os.error as e:
(num, msg) = e
try:
os.unlink(dst)
except os.error:
pass
raise DistutilsFileError(
"couldn't move '%s' to '%s' by copy/delete: "
"delete '%s' failed: %s"
% (src, dst, src, msg))
return dst
def write_file (filename, contents):
"""Create a file with the specified name and write 'contents' (a
sequence of strings without line terminators) to it.
"""
f = open(filename, "w")
try:
for line in contents:
f.write(line + "\n")
finally:
f.close()
| apache-2.0 |
bwrsandman/OpenUpgrade | addons/hr_contract/__openerp__.py | 260 | 1834 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Employee Contracts',
'version': '1.0',
'category': 'Human Resources',
'description': """
Add all information on the employee form to manage contracts.
=============================================================
* Contract
* Place of Birth,
* Medical Examination Date
* Company Vehicle
You can assign several contracts per employee.
""",
'author': 'OpenERP SA',
'website': 'https://www.odoo.com/page/employees',
'depends': ['base_action_rule', 'hr'],
'data': [
'security/ir.model.access.csv',
'hr_contract_view.xml',
'hr_contract_data.xml',
'base_action_rule_view.xml',
],
'demo': [],
'test': ['test/test_hr_contract.yml'],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
AI-comp/Orientation2015Problems | rime/plugins/pack_mjudge.py | 1 | 4824 | #!/usr/bin/python
#
# Copyright (c) 2011 Rime Project.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
import itertools
import os
import os.path
import rime.basic.targets.problem # target dependency
import rime.basic.targets.project # target dependency
import rime.basic.targets.testset # target dependency
from rime.basic import consts
from rime.core import commands
from rime.core import targets
from rime.core import taskgraph
from rime.util import files
_PACKED_TARBALL_TEMPLATE = '%s.tar.gz'
class Project(targets.registry.Project):
@taskgraph.task_method
def Pack(self, ui):
results = yield taskgraph.TaskBranch(
[problem.Pack(ui) for problem in self.problems])
yield all(results)
class Problem(targets.registry.Problem):
@taskgraph.task_method
def Pack(self, ui):
results = yield taskgraph.TaskBranch(
[testset.Pack(ui) for testset in self.testsets])
yield all(results)
class Testset(targets.registry.Testset):
def __init__(self, *args, **kwargs):
super(Testset, self).__init__(*args, **kwargs)
self.pack_dir = os.path.join(self.out_dir, 'pack')
@taskgraph.task_method
def Pack(self, ui):
if not (yield self.Build(ui)):
yield False
testcases = self.ListTestCases()
ui.console.PrintAction('PACK', self, progress=True)
try:
files.RemoveTree(self.pack_dir)
files.MakeDir(self.pack_dir)
except:
ui.errors.Exception(self)
yield False
for (i, testcase) in enumerate(testcases):
basename = os.path.splitext(testcase.infile)[0]
difffile = basename + consts.DIFF_EXT
packed_infile = str(i+1) + consts.IN_EXT
packed_difffile = str(i+1) + consts.DIFF_EXT
try:
ui.console.PrintAction(
'PACK',
self,
'%s -> %s' % (testcase.infile, packed_infile),
progress=True)
files.CopyFile(os.path.join(self.out_dir, testcase.infile),
os.path.join(self.pack_dir, packed_infile))
ui.console.PrintAction(
'PACK',
self,
'%s -> %s' % (difffile, packed_difffile),
progress=True)
files.CopyFile(os.path.join(self.out_dir, difffile),
os.path.join(self.pack_dir, packed_difffile))
except:
ui.errors.Exception(self)
yield False
tarball_filename = _PACKED_TARBALL_TEMPLATE % self.name
tar_args = ('tar', 'czf',
os.path.join(os.pardir, os.pardir, tarball_filename),
os.curdir)
ui.console.PrintAction(
'PACK',
self,
' '.join(tar_args),
progress=True)
devnull = files.OpenNull()
task = taskgraph.ExternalProcessTask(
tar_args, cwd=self.pack_dir,
stdin=devnull, stdout=devnull, stderr=devnull)
try:
proc = yield task
except:
ui.errors.Exception(self)
yield False
ret = proc.returncode
if ret != 0:
ui.errors.Error(self, 'tar failed: ret = %d' % ret)
yield False
ui.console.PrintAction(
'PACK',
self,
tarball_filename)
yield True
targets.registry.Override('Project', Project)
targets.registry.Override('Problem', Problem)
targets.registry.Override('Testset', Testset)
class Pack(commands.CommandBase):
def __init__(self, parent):
super(Pack, self).__init__(
'pack',
'',
'Pack testsets to export to M-judge. (pack_mjudge plugin)',
'',
parent)
def Run(self, obj, args, ui):
"""Entry point for pack command."""
if args:
ui.console.PrintError('Extra argument passed to pack command!')
return None
if isinstance(obj, (Project, Problem, Testset)):
return obj.Pack(ui)
ui.console.PrintError('Pack is not supported for the specified target.')
return None
commands.registry.Add(Pack)
| mit |
laszlocsomor/tensorflow | tensorflow/python/kernel_tests/concatenate_dataset_op_test.py | 42 | 5552 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the experimental input pipeline ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.util import nest
from tensorflow.python.framework import errors
from tensorflow.python.framework import tensor_shape
from tensorflow.python.platform import test
class ConcatenateDatasetTest(test.TestCase):
def testConcatenateDataset(self):
input_components = (
np.tile(np.array([[1], [2], [3], [4]]), 20),
np.tile(np.array([[12], [13], [14], [15]]), 15),
np.array([37.0, 38.0, 39.0, 40.0]))
to_concatenate_components = (
np.tile(np.array([[1], [2], [3], [4], [5]]), 20),
np.tile(np.array([[12], [13], [14], [15], [16]]), 15),
np.array([37.0, 38.0, 39.0, 40.0, 41.0]))
input_dataset = dataset_ops.Dataset.from_tensor_slices(input_components)
dataset_to_concatenate = dataset_ops.Dataset.from_tensor_slices(
to_concatenate_components)
concatenated = input_dataset.concatenate(dataset_to_concatenate)
self.assertEqual(concatenated.output_shapes, (tensor_shape.TensorShape(
[20]), tensor_shape.TensorShape([15]), tensor_shape.TensorShape([])))
iterator = concatenated.make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
for i in range(9):
result = sess.run(get_next)
if i < 4:
for component, result_component in zip(input_components, result):
self.assertAllEqual(component[i], result_component)
else:
for component, result_component in zip(to_concatenate_components,
result):
self.assertAllEqual(component[i - 4], result_component)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testConcatenateDatasetDifferentShape(self):
input_components = (
np.tile(np.array([[1], [2], [3], [4]]), 20),
np.tile(np.array([[12], [13], [14], [15]]), 4))
to_concatenate_components = (
np.tile(np.array([[1], [2], [3], [4], [5]]), 20),
np.tile(np.array([[12], [13], [14], [15], [16]]), 15))
input_dataset = dataset_ops.Dataset.from_tensor_slices(input_components)
dataset_to_concatenate = dataset_ops.Dataset.from_tensor_slices(
to_concatenate_components)
concatenated = input_dataset.concatenate(dataset_to_concatenate)
self.assertEqual(
[ts.as_list()
for ts in nest.flatten(concatenated.output_shapes)], [[20], [None]])
iterator = concatenated.make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
for i in range(9):
result = sess.run(get_next)
if i < 4:
for component, result_component in zip(input_components, result):
self.assertAllEqual(component[i], result_component)
else:
for component, result_component in zip(to_concatenate_components,
result):
self.assertAllEqual(component[i - 4], result_component)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testConcatenateDatasetDifferentStructure(self):
input_components = (
np.tile(np.array([[1], [2], [3], [4]]), 5),
np.tile(np.array([[12], [13], [14], [15]]), 4))
to_concatenate_components = (
np.tile(np.array([[1], [2], [3], [4], [5]]), 20),
np.tile(np.array([[12], [13], [14], [15], [16]]), 15),
np.array([37.0, 38.0, 39.0, 40.0, 41.0]))
input_dataset = dataset_ops.Dataset.from_tensor_slices(input_components)
dataset_to_concatenate = dataset_ops.Dataset.from_tensor_slices(
to_concatenate_components)
with self.assertRaisesRegexp(ValueError,
"don't have the same number of elements"):
input_dataset.concatenate(dataset_to_concatenate)
def testConcatenateDatasetDifferentType(self):
input_components = (
np.tile(np.array([[1], [2], [3], [4]]), 5),
np.tile(np.array([[12], [13], [14], [15]]), 4))
to_concatenate_components = (
np.tile(np.array([[1.0], [2.0], [3.0], [4.0]]), 5),
np.tile(np.array([[12], [13], [14], [15]]), 15))
input_dataset = dataset_ops.Dataset.from_tensor_slices(input_components)
dataset_to_concatenate = dataset_ops.Dataset.from_tensor_slices(
to_concatenate_components)
with self.assertRaisesRegexp(TypeError, "have different types"):
input_dataset.concatenate(dataset_to_concatenate)
if __name__ == "__main__":
test.main()
| apache-2.0 |
nielsbuwen/ilastik | tests/test_applets/thresholdTwoLevels/testOpAnisotropicGaussianSmoothing.py | 4 | 3469 | ###############################################################################
# ilastik: interactive learning and segmentation toolkit
#
# Copyright (C) 2011-2014, the ilastik developers
# <team@ilastik.org>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# In addition, as a special exception, the copyright holders of
# ilastik give you permission to combine ilastik with applets,
# workflows and plugins which are not covered under the GNU
# General Public License.
#
# See the LICENSE file for details. License information is also available
# on the ilastik web site at:
# http://ilastik.org/license.html
###############################################################################
import numpy
from numpy.testing import assert_array_equal
import vigra
np = numpy
from lazyflow.graph import Graph
from lazyflow.operators import OpReorderAxes
from ilastik.applets.thresholdTwoLevels.thresholdingTools import OpAnisotropicGaussianSmoothing5d
import ilastik.ilastik_logging
ilastik.ilastik_logging.default_config.init()
import unittest
class TestOpAnisotropicGaussianSmoothing5d(unittest.TestCase):
def setUp(self):
g = Graph()
r1 = OpReorderAxes(graph=g)
r1.AxisOrder.setValue('txyzc')
op = OpAnisotropicGaussianSmoothing5d(graph=g)
op.Input.connect(r1.Output)
self.r1 = r1
self.op = op
def test2d(self):
vol = np.random.rand(50, 50)
vol = vigra.taggedView(vol, axistags='xy')
self.r1.Input.setValue(vol)
out = self.op.Output[...].wait()
def test3d(self):
vol = np.random.rand(50, 50, 50)
vol = vigra.taggedView(vol, axistags='xyz')
self.r1.Input.setValue(vol)
out = self.op.Output[...].wait()
def test4d(self):
vol = np.random.rand(50, 50, 50, 5)
vol = vigra.taggedView(vol, axistags='xyzc')
self.r1.Input.setValue(vol)
out = self.op.Output[...].wait()
def test5d(self):
vol = np.random.rand(50, 50, 50, 5, 2)
vol = vigra.taggedView(vol, axistags='xyzct')
self.r1.Input.setValue(vol)
out = self.op.Output[...].wait()
def testExtend(self):
vol = np.random.rand(50, 50)
vol = vigra.taggedView(vol, axistags='xy')
self.r1.Input.setValue(vol)
out = self.op.Output[0, :10, :10, 0, 0].wait().squeeze()
out2 = self.op.Output[...].wait()
assert_array_equal(out, out2[0, :10, :10, 0, 0].squeeze())
def testSmoothing(self):
op = self.op
vol = np.random.rand(50, 50).astype(np.float32)
vol = vigra.taggedView(vol, axistags='xy')
self.r1.Input.setValue(vol)
out = op.Output[...].wait()
out = vigra.taggedView(out, axistags=op.Output.meta.axistags).squeeze()
out2 = vigra.filters.gaussianSmoothing(vol, 1.0, window_size=2.0)
assert_array_equal(out, out2)
def testReqFromMid(self):
vol = np.random.rand(3, 50, 50, 1, 1)
vol = vigra.taggedView(vol, axistags='txyzc')
self.r1.Input.setValue(vol)
out = self.op.Output[2, 10:20, 10:20, 0, 0].wait()
if __name__ == "__main__":
import nose
nose.run(defaultTest=__file__, env={'NOSE_NOCAPTURE': 1})
| gpl-3.0 |
Javex/mixminion | etc/mmUtils.py | 6 | 2318 | # Implmentes the mixminion interface.
import os, sys
import re
# Give it a list of ommands and what should go in the std input
# it returns what appeared in the std output.
# PRIVATE: DO NOT CALL FROM OUTSIDE THIS MODULE!!!
def mm_command(cmd, in_str = None, show_stderr = 1):
# c = cmd
c = reduce(lambda x,y: x+" "+y, cmd)
print c
if show_stderr == 1:
(sout,sin) = os.popen4(c)
else:
(sout,sin,serr) = os.popen3(c)
if in_str != None:
sout.write(in_str+'\n')
sout.close()
result = sin.read()
return result
# provides a single use reply block
# If an error occus it return an empty list '[]'
def getSURB(addrs,login,passwd):
rs = mm_command(['mixminion','generate-surb','--identity=\"%s\"'%login,'-t',addrs], passwd)
surbPat = re.compile('-----BEGIN TYPE III REPLY BLOCK-----[^\-]*-----END TYPE III REPLY BLOCK-----',re.S)
rs = surbPat.findall(rs)
return rs
# routine to decode a received mixminion message
# If there is an error the empty string is returned.
def decode(msg,passwd):
decPat = re.compile('-----BEGIN TYPE III ANONYMOUS MESSAGE-----\r?\nMessage-type: (plaintext|encrypted)(.*)-----END TYPE III ANONYMOUS MESSAGE-----\r?\n',re.S)
mtc = decPat.search(msg)
if mtc != None:
f = open('__tempMM','w')
f.write(mtc.group(0))
f.close()
rs = mm_command(['mixminion','decode','-i','__tempMM'], passwd, 0)
# os.remove('__tempMM')
rs.strip('\n')
return rs+'\n'
# Delete file!
# Simply sends a message
def send(msg,addrs,cmd):
f = open('__msgMM','w')
f.write(msg)
f.close()
rs = mm_command(['mixminion','send','-i','__msgMM','-t',addrs]+cmd, None)
os.remove('__msgMM')
return rs
# routine to send a message using mixminion.
def reply(msg,surb,cmd):
f = open('__msgMM','w')
f.write(msg)
f.close()
f = open('__surbMM','w')
f.write(surb)
f.close()
rs = mm_command(['mixminion','send','-i','__msgMM','-R','__surbMM']+cmd, None)
os.remove('__msgMM')
os.remove('__surbMM')
return rs
# Delete files !!
# Old debugging information
if __name__ == '__main__':
import getpass
sb = getSURB('gd216@cl.cam.ac.uk',getpass.getpass())
# reply('Hello world\nThis is my message\n',sb[0])
# print rs
| mit |
takeshineshiro/nova | nova/tests/unit/api/openstack/compute/test_plugins/dummy_schema.py | 94 | 1276 | # Copyright 2014 IBM
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.api.validation import parameter_types
dummy = {
'type': 'object',
'properties': {
'dummy': {
'type': 'object',
'properties': {
'val': parameter_types.name,
},
'additionalProperties': False,
},
},
'required': ['dummy'],
'additionalProperties': False,
}
dummy2 = {
'type': 'object',
'properties': {
'dummy': {
'type': 'object',
'properties': {
'val2': parameter_types.name,
},
'additionalProperties': False,
},
},
'required': ['dummy'],
'additionalProperties': False,
}
| apache-2.0 |
sduenas/perceval | tests/test_bugzillarest.py | 1 | 22077 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright (C) 2015-2017 Bitergia
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335, USA.
#
# Authors:
# Santiago Dueñas <sduenas@bitergia.com>
#
import datetime
import os
import shutil
import unittest
import httpretty
import pkg_resources
pkg_resources.declare_namespace('perceval.backends')
from perceval.backend import BackendCommandArgumentParser
from perceval.errors import BackendError
from perceval.utils import DEFAULT_DATETIME
from perceval.backends.core.bugzillarest import (BugzillaREST,
BugzillaRESTCommand,
BugzillaRESTClient,
BugzillaRESTError)
from base import TestCaseBackendArchive
BUGZILLA_SERVER_URL = 'http://example.com'
BUGZILLA_LOGIN_URL = BUGZILLA_SERVER_URL + '/rest/login'
BUGZILLA_BUGS_URL = BUGZILLA_SERVER_URL + '/rest/bug'
BUGZILLA_BUGS_COMMENTS_1273442_URL = BUGZILLA_SERVER_URL + '/rest/bug/1273442/comment'
BUGZILLA_BUGS_HISTORY_1273442_URL = BUGZILLA_SERVER_URL + '/rest/bug/1273442/history'
BUGZILLA_BUGS_ATTACHMENTS_1273442_URL = BUGZILLA_SERVER_URL + '/rest/bug/1273442/attachment'
BUGZILLA_BUG_947945_URL = BUGZILLA_SERVER_URL + '/rest/bug/947945/'
def read_file(filename, mode='r'):
with open(os.path.join(os.path.dirname(os.path.abspath(__file__)), filename), mode) as f:
content = f.read()
return content
def setup_http_server():
http_requests = []
bodies_bugs = [read_file('data/bugzilla/bugzilla_rest_bugs.json', mode='rb'),
read_file('data/bugzilla/bugzilla_rest_bugs_next.json', mode='rb'),
read_file('data/bugzilla/bugzilla_rest_bugs_empty.json', mode='rb')]
body_comments = [read_file('data/bugzilla/bugzilla_rest_bugs_comments.json', mode='rb'),
read_file('data/bugzilla/bugzilla_rest_bugs_comments_empty.json', mode='rb')]
body_history = [read_file('data/bugzilla/bugzilla_rest_bugs_history.json', mode='rb'),
read_file('data/bugzilla/bugzilla_rest_bugs_history_empty.json', mode='rb')]
body_attachments = [read_file('data/bugzilla/bugzilla_rest_bugs_attachments.json', mode='rb'),
read_file('data/bugzilla/bugzilla_rest_bugs_attachments_empty.json', mode='rb')]
def request_callback(method, uri, headers):
if uri.startswith(BUGZILLA_BUGS_COMMENTS_1273442_URL):
body = body_comments[0]
elif uri.startswith(BUGZILLA_BUGS_HISTORY_1273442_URL):
body = body_history[0]
elif uri.startswith(BUGZILLA_BUGS_ATTACHMENTS_1273442_URL):
body = body_attachments[0]
elif uri.startswith(BUGZILLA_BUG_947945_URL):
if uri.find('comment') > 0:
body = body_comments[1]
elif uri.find('history') > 0:
body = body_history[1]
else:
body = body_attachments[1]
else:
body = bodies_bugs.pop(0)
http_requests.append(httpretty.last_request())
return (200, headers, body)
httpretty.register_uri(httpretty.GET,
BUGZILLA_BUGS_URL,
responses=[
httpretty.Response(body=request_callback)
for _ in range(3)
])
http_urls = [BUGZILLA_BUGS_COMMENTS_1273442_URL,
BUGZILLA_BUGS_HISTORY_1273442_URL,
BUGZILLA_BUGS_ATTACHMENTS_1273442_URL]
suffixes = ['comment', 'history', 'attachment']
for http_url in [BUGZILLA_BUG_947945_URL]:
for suffix in suffixes:
http_urls.append(http_url + suffix)
for req_url in http_urls:
httpretty.register_uri(httpretty.GET,
req_url,
responses=[
httpretty.Response(body=request_callback)
])
return http_requests
class TestBugzillaRESTBackend(unittest.TestCase):
"""Bugzilla REST backend tests"""
def test_initialization(self):
"""Test whether attributes are initializated"""
bg = BugzillaREST(BUGZILLA_SERVER_URL, tag='test',
max_bugs=5)
self.assertEqual(bg.url, BUGZILLA_SERVER_URL)
self.assertEqual(bg.origin, BUGZILLA_SERVER_URL)
self.assertEqual(bg.tag, 'test')
self.assertEqual(bg.max_bugs, 5)
self.assertIsNone(bg.client)
# When tag is empty or None it will be set to
# the value in URL
bg = BugzillaREST(BUGZILLA_SERVER_URL)
self.assertEqual(bg.url, BUGZILLA_SERVER_URL)
self.assertEqual(bg.origin, BUGZILLA_SERVER_URL)
self.assertEqual(bg.tag, BUGZILLA_SERVER_URL)
bg = BugzillaREST(BUGZILLA_SERVER_URL, tag='')
self.assertEqual(bg.url, BUGZILLA_SERVER_URL)
self.assertEqual(bg.origin, BUGZILLA_SERVER_URL)
self.assertEqual(bg.tag, BUGZILLA_SERVER_URL)
def test_has_resuming(self):
"""Test if it returns True when has_resuming is called"""
self.assertEqual(BugzillaREST.has_resuming(), True)
def test_has_archiving(self):
"""Test if it returns True when has_archiving is called"""
self.assertEqual(BugzillaREST.has_archiving(), True)
@httpretty.activate
def test_fetch(self):
"""Test whether a list of bugs is returned"""
http_requests = setup_http_server()
bg = BugzillaREST(BUGZILLA_SERVER_URL, max_bugs=2)
bugs = [bug for bug in bg.fetch(from_date=None)]
self.assertEqual(len(bugs), 3)
self.assertEqual(bugs[0]['data']['id'], 1273442)
self.assertEqual(len(bugs[0]['data']['comments']), 7)
self.assertEqual(len(bugs[0]['data']['history']), 6)
self.assertEqual(len(bugs[0]['data']['attachments']), 1)
self.assertEqual(bugs[0]['origin'], BUGZILLA_SERVER_URL)
self.assertEqual(bugs[0]['uuid'], '68494ad0072ed9e09cecb8235649a38c443326db')
self.assertEqual(bugs[0]['updated_on'], 1465257689.0)
self.assertEqual(bugs[0]['category'], 'bug')
self.assertEqual(bugs[0]['tag'], BUGZILLA_SERVER_URL)
self.assertEqual(bugs[1]['data']['id'], 1273439)
self.assertEqual(len(bugs[1]['data']['comments']), 0)
self.assertEqual(len(bugs[1]['data']['history']), 0)
self.assertEqual(len(bugs[1]['data']['attachments']), 0)
self.assertEqual(bugs[1]['origin'], BUGZILLA_SERVER_URL)
self.assertEqual(bugs[1]['uuid'], 'd306162de06bc759f9bd9227fe3fd5f08aeb0dde')
self.assertEqual(bugs[1]['updated_on'], 1465257715.0)
self.assertEqual(bugs[1]['category'], 'bug')
self.assertEqual(bugs[1]['tag'], BUGZILLA_SERVER_URL)
self.assertEqual(bugs[2]['data']['id'], 947945)
self.assertEqual(len(bugs[2]['data']['comments']), 0)
self.assertEqual(len(bugs[2]['data']['history']), 0)
self.assertEqual(len(bugs[2]['data']['attachments']), 0)
self.assertEqual(bugs[2]['origin'], BUGZILLA_SERVER_URL)
self.assertEqual(bugs[2]['uuid'], '33edda925351c3310fc3e12d7f18a365c365f6bd')
self.assertEqual(bugs[2]['updated_on'], 1465257743.0)
self.assertEqual(bugs[2]['category'], 'bug')
self.assertEqual(bugs[2]['tag'], BUGZILLA_SERVER_URL)
# Check requests
expected = [
{
'last_change_time': ['1970-01-01T00:00:00Z'],
'limit': ['2'],
'order': ['changeddate'],
'include_fields': ['_all']
},
{
'ids': ['1273442', '1273439']
},
{
'ids': ['1273442', '1273439']
},
{
'ids': ['1273442', '1273439'],
'exclude_fields': ['data']
},
{
'last_change_time': ['1970-01-01T00:00:00Z'],
'offset': ['2'],
'limit': ['2'],
'order': ['changeddate'],
'include_fields': ['_all']
},
{
'ids': ['947945']
},
{
'ids': ['947945']
},
{
'ids': ['947945'],
'exclude_fields': ['data']
},
{
'last_change_time': ['1970-01-01T00:00:00Z'],
'offset': ['4'],
'limit': ['2'],
'order': ['changeddate'],
'include_fields': ['_all']
}
]
self.assertEqual(len(http_requests), len(expected))
for i in range(len(expected)):
self.assertDictEqual(http_requests[i].querystring, expected[i])
@httpretty.activate
def test_fetch_empty(self):
"""Test whether it works when no bugs are fetched"""
body = read_file('data/bugzilla/bugzilla_rest_bugs_empty.json')
httpretty.register_uri(httpretty.GET,
BUGZILLA_BUGS_URL,
body=body, status=200)
bg = BugzillaREST(BUGZILLA_SERVER_URL)
bugs = [bug for bug in bg.fetch()]
self.assertEqual(len(bugs), 0)
class TestBugzillaRESTBackendArchive(TestCaseBackendArchive):
"""Bugzilla REST backend tests using an archive"""
def setUp(self):
super().setUp()
self.backend = BugzillaREST(BUGZILLA_SERVER_URL, max_bugs=2, archive=self.archive)
def tearDown(self):
shutil.rmtree(self.test_path)
@httpretty.activate
def test_fetch_from_archive(self):
"""Test whether a list of bugs is returned from the archive"""
setup_http_server()
self._test_fetch_from_archive(from_date=None)
@httpretty.activate
def test_fetch_empty_from_archive(self):
"""Test whether it works when no bugs are fetched from the archive"""
body = read_file('data/bugzilla/bugzilla_rest_bugs_empty.json')
httpretty.register_uri(httpretty.GET,
BUGZILLA_BUGS_URL,
body=body, status=200)
self._test_fetch_from_archive(from_date=None)
class TestBugzillaRESTClient(unittest.TestCase):
"""Bugzilla REST API client tests
These tests not check the body of the response, only if the call
was well formed and if a response was obtained. Due to this, take
into account that the body returned on each request might not
match with the parameters from the request.
"""
@httpretty.activate
def test_init(self):
"""Test initialization"""
client = BugzillaRESTClient(BUGZILLA_SERVER_URL)
self.assertEqual(client.base_url, BUGZILLA_SERVER_URL)
self.assertEqual(client.api_token, None)
@httpretty.activate
def test_init_auth(self):
"""Test initialization with authentication"""
# Set up a mock HTTP server
httpretty.register_uri(httpretty.GET,
BUGZILLA_LOGIN_URL,
body='{"token": "786-OLaWfBisMY", "id": "786"}',
status=200)
client = BugzillaRESTClient(BUGZILLA_SERVER_URL,
user='jsmith@example.com',
password='1234')
self.assertEqual(client.api_token, '786-OLaWfBisMY')
# Check request params
expected = {
'login': ['jsmith@example.com'],
'password': ['1234'],
}
req = httpretty.last_request()
self.assertEqual(req.method, 'GET')
self.assertRegex(req.path, '/rest/login')
self.assertEqual(req.querystring, expected)
@httpretty.activate
def test_invalid_auth(self):
"""Test whether it fails when the authentication goes wrong"""
# Set up a mock HTTP server
httpretty.register_uri(httpretty.GET,
BUGZILLA_LOGIN_URL,
body="401 Client Error: Authorization Required",
status=401)
with self.assertRaises(BackendError):
_ = BugzillaRESTClient(BUGZILLA_SERVER_URL,
user='jsmith@example.com',
password='1234')
@httpretty.activate
def test_auth_token_call(self):
"""Test whether the API token is included on the calls when it was set"""
# Set up a mock HTTP server
body = read_file('data/bugzilla/bugzilla_rest_bugs.json')
# Set up a mock HTTP server
httpretty.register_uri(httpretty.GET,
BUGZILLA_LOGIN_URL,
body='{"token": "786-OLaWfBisMY", "id": "786"}',
status=200)
httpretty.register_uri(httpretty.GET,
BUGZILLA_BUGS_URL,
body=body, status=200)
# Test API token login
client = BugzillaRESTClient(BUGZILLA_SERVER_URL,
user='jsmith@example.com',
password='1234')
self.assertEqual(client.api_token, '786-OLaWfBisMY')
# Check whether it is included on the calls
_ = client.bugs()
# Check request params
expected = {
'last_change_time': ['1970-01-01T00:00:00Z'],
'limit': ['500'],
'order': ['changeddate'],
'include_fields': ['_all'],
'token': ['786-OLaWfBisMY']
}
req = httpretty.last_request()
self.assertDictEqual(req.querystring, expected)
# Test API token initialization
client = BugzillaRESTClient(BUGZILLA_SERVER_URL,
api_token='ABCD')
_ = client.bugs()
expected = {
'last_change_time': ['1970-01-01T00:00:00Z'],
'limit': ['500'],
'order': ['changeddate'],
'include_fields': ['_all'],
'token': ['ABCD']
}
req = httpretty.last_request()
self.assertDictEqual(req.querystring, expected)
@httpretty.activate
def test_bugs(self):
"""Test bugs API call"""
# Set up a mock HTTP server
body = read_file('data/bugzilla/bugzilla_rest_bugs.json')
httpretty.register_uri(httpretty.GET,
BUGZILLA_BUGS_URL,
body=body, status=200)
# Call API
client = BugzillaRESTClient(BUGZILLA_SERVER_URL)
response = client.bugs()
self.assertEqual(response, body)
# Check request params
expected = {
'last_change_time': ['1970-01-01T00:00:00Z'],
'limit': ['500'],
'order': ['changeddate'],
'include_fields': ['_all']
}
req = httpretty.last_request()
self.assertEqual(req.method, 'GET')
self.assertRegex(req.path, '/rest/bug')
self.assertDictEqual(req.querystring, expected)
# Call API with parameters
from_date = datetime.datetime(2016, 6, 7, 0, 0, 0)
client = BugzillaRESTClient(BUGZILLA_SERVER_URL)
response = client.bugs(from_date=from_date, offset=100, max_bugs=5)
self.assertEqual(response, body)
expected = {
'last_change_time': ['2016-06-07T00:00:00Z'],
'offset': ['100'],
'limit': ['5'],
'order': ['changeddate'],
'include_fields': ['_all']
}
req = httpretty.last_request()
self.assertEqual(req.method, 'GET')
self.assertRegex(req.path, '/rest/bug')
self.assertDictEqual(req.querystring, expected)
@httpretty.activate
def test_comments(self):
"""Test comments API call"""
# Set up a mock HTTP server
body = read_file('data/bugzilla/bugzilla_rest_bugs_comments.json')
httpretty.register_uri(httpretty.GET,
BUGZILLA_BUGS_COMMENTS_1273442_URL,
body=body, status=200)
# Call API
client = BugzillaRESTClient(BUGZILLA_SERVER_URL)
response = client.comments('1273442', '1273439')
self.assertEqual(response, body)
# Check request params
expected = {
'ids': ['1273442', '1273439']
}
req = httpretty.last_request()
self.assertEqual(req.method, 'GET')
self.assertRegex(req.path, '/rest/bug/1273442/comment')
self.assertDictEqual(req.querystring, expected)
@httpretty.activate
def test_history(self):
"""Test history API call"""
# Set up a mock HTTP server
body = read_file('data/bugzilla/bugzilla_rest_bugs_history.json')
httpretty.register_uri(httpretty.GET,
BUGZILLA_BUGS_HISTORY_1273442_URL,
body=body, status=200)
# Call API
client = BugzillaRESTClient(BUGZILLA_SERVER_URL)
response = client.history('1273442', '1273439')
self.assertEqual(response, body)
# Check request params
expected = {
'ids': ['1273442', '1273439']
}
req = httpretty.last_request()
self.assertEqual(req.method, 'GET')
self.assertRegex(req.path, '/rest/bug/1273442/history')
self.assertDictEqual(req.querystring, expected)
@httpretty.activate
def test_attachments(self):
"""Test attachments API call"""
# Set up a mock HTTP server
body = read_file('data/bugzilla/bugzilla_rest_bugs_attachments.json')
httpretty.register_uri(httpretty.GET,
BUGZILLA_BUGS_ATTACHMENTS_1273442_URL,
body=body, status=200)
# Call API
client = BugzillaRESTClient(BUGZILLA_SERVER_URL)
response = client.attachments('1273442', '1273439')
self.assertEqual(response, body)
# Check request params
expected = {
'ids': ['1273442', '1273439'],
'exclude_fields': ['data']
}
req = httpretty.last_request()
self.assertEqual(req.method, 'GET')
self.assertRegex(req.path, '/rest/bug/1273442/attachment')
self.assertDictEqual(req.querystring, expected)
@httpretty.activate
def test_user_agent_header(self):
"""Test if the User-Agent header is included on every API call"""
# Set up a mock HTTP server
body = read_file('data/bugzilla/bugzilla_rest_bugs_history.json')
httpretty.register_uri(httpretty.GET,
BUGZILLA_BUGS_HISTORY_1273442_URL,
body=body, status=200)
# Call API
client = BugzillaRESTClient(BUGZILLA_SERVER_URL)
response = client.history('1273442', '1273439')
self.assertEqual(response, body)
# Check request params
expected = {
'ids': ['1273442', '1273439']
}
req = httpretty.last_request()
user_agent = req.headers['User-Agent']
self.assertEqual(user_agent.startswith('Perceval/'), True)
@httpretty.activate
def test_rest_error(self):
"""Test if an exception is raised when the server returns an error"""
# Set up a mock HTTP server
body = read_file('data/bugzilla/bugzilla_rest_error.json')
httpretty.register_uri(httpretty.GET,
BUGZILLA_BUGS_URL,
body=body, status=200)
client = BugzillaRESTClient(BUGZILLA_SERVER_URL)
with self.assertRaises(BugzillaRESTError) as e:
_ = client.call('bug', {})
self.assertEqual(e.exception.code, 32000)
self.assertEqual(e.exception.error,
"API key authentication is required.")
class TestBugzillaRESTCommand(unittest.TestCase):
"""BugzillaRESTCommand unit tests"""
def test_backend_class(self):
"""Test if the backend class is Bugzilla"""
self.assertIs(BugzillaRESTCommand.BACKEND, BugzillaREST)
def test_setup_cmd_parser(self):
"""Test if it parser object is correctly initialized"""
parser = BugzillaRESTCommand.setup_cmd_parser()
self.assertIsInstance(parser, BackendCommandArgumentParser)
args = ['--backend-user', 'jsmith@example.com',
'--backend-password', '1234',
'--api-token', 'abcdefg',
'--max-bugs', '10', '--tag', 'test',
'--from-date', '1970-01-01',
'--no-archive',
BUGZILLA_SERVER_URL]
parsed_args = parser.parse(*args)
self.assertEqual(parsed_args.user, 'jsmith@example.com')
self.assertEqual(parsed_args.password, '1234')
self.assertEqual(parsed_args.api_token, 'abcdefg')
self.assertEqual(parsed_args.max_bugs, 10)
self.assertEqual(parsed_args.tag, 'test')
self.assertEqual(parsed_args.from_date, DEFAULT_DATETIME)
self.assertEqual(parsed_args.no_archive, True)
self.assertEqual(parsed_args.url, BUGZILLA_SERVER_URL)
if __name__ == "__main__":
unittest.main(warnings='ignore')
| gpl-3.0 |
hubert667/AIR | build/PyYAML/lib3/yaml/cyaml.py | 274 | 3294 |
__all__ = ['CBaseLoader', 'CSafeLoader', 'CLoader',
'CBaseDumper', 'CSafeDumper', 'CDumper']
from _yaml import CParser, CEmitter
from .constructor import *
from .serializer import *
from .representer import *
from .resolver import *
class CBaseLoader(CParser, BaseConstructor, BaseResolver):
def __init__(self, stream):
CParser.__init__(self, stream)
BaseConstructor.__init__(self)
BaseResolver.__init__(self)
class CSafeLoader(CParser, SafeConstructor, Resolver):
def __init__(self, stream):
CParser.__init__(self, stream)
SafeConstructor.__init__(self)
Resolver.__init__(self)
class CLoader(CParser, Constructor, Resolver):
def __init__(self, stream):
CParser.__init__(self, stream)
Constructor.__init__(self)
Resolver.__init__(self)
class CBaseDumper(CEmitter, BaseRepresenter, BaseResolver):
def __init__(self, stream,
default_style=None, default_flow_style=None,
canonical=None, indent=None, width=None,
allow_unicode=None, line_break=None,
encoding=None, explicit_start=None, explicit_end=None,
version=None, tags=None):
CEmitter.__init__(self, stream, canonical=canonical,
indent=indent, width=width, encoding=encoding,
allow_unicode=allow_unicode, line_break=line_break,
explicit_start=explicit_start, explicit_end=explicit_end,
version=version, tags=tags)
Representer.__init__(self, default_style=default_style,
default_flow_style=default_flow_style)
Resolver.__init__(self)
class CSafeDumper(CEmitter, SafeRepresenter, Resolver):
def __init__(self, stream,
default_style=None, default_flow_style=None,
canonical=None, indent=None, width=None,
allow_unicode=None, line_break=None,
encoding=None, explicit_start=None, explicit_end=None,
version=None, tags=None):
CEmitter.__init__(self, stream, canonical=canonical,
indent=indent, width=width, encoding=encoding,
allow_unicode=allow_unicode, line_break=line_break,
explicit_start=explicit_start, explicit_end=explicit_end,
version=version, tags=tags)
SafeRepresenter.__init__(self, default_style=default_style,
default_flow_style=default_flow_style)
Resolver.__init__(self)
class CDumper(CEmitter, Serializer, Representer, Resolver):
def __init__(self, stream,
default_style=None, default_flow_style=None,
canonical=None, indent=None, width=None,
allow_unicode=None, line_break=None,
encoding=None, explicit_start=None, explicit_end=None,
version=None, tags=None):
CEmitter.__init__(self, stream, canonical=canonical,
indent=indent, width=width, encoding=encoding,
allow_unicode=allow_unicode, line_break=line_break,
explicit_start=explicit_start, explicit_end=explicit_end,
version=version, tags=tags)
Representer.__init__(self, default_style=default_style,
default_flow_style=default_flow_style)
Resolver.__init__(self)
| gpl-3.0 |
Zigazou/PyMinitel | minitel/ui/Conteneur.py | 1 | 8946 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Classe permettant de regrouper des éléments d’interface utilisateur"""
from .UI import UI
from ..Sequence import Sequence
from ..constantes import ENTREE, MAJ_ENTREE
class Conteneur(UI):
"""Classe permettant de regrouper des éléments d’interface utilisateur
Cette classe permet de regrouper des éléments d’inteface utilisateur afin
de faciliter leur gestion. Elle est notamment capable d’afficher tous les
éléments qu’elle contient et de gérer le passage d’un élément à un autre.
Le passage d’un élément à l’autre se fait au moyen de la touche ENTREE pour
l’élément suivant et de la combinaison MAJUSCULE+ENTREE pour l’élément
précédent. Si l’utilisateur veut l’élément suivant alors qu’il est déjà
sur le dernier élément, le Minitel émettra un bip. Idem pour l’élément
précédent.
Le éléments dont l’attribut activable est à False sont purement et
simplement ignorés lors de la navigation inter-éléments.
Les attributs suivants sont disponibles :
- elements : liste des éléments dans leur ordre d’apparition
- element_actif : objet de classe UI désignant l’élément actif
- fond : couleur de fond du conteneur
"""
def __init__(self, minitel, posx, posy, largeur, hauteur, couleur = None,
fond = None):
"""Constructeur
:param minitel:
L’objet auquel envoyer les commandes et recevoir les appuis de
touche.
:type minitel:
un objet Minitel
:param posx:
Coordonnée x de l’élément
:type posx:
un entier
:param posy:
Coordonnée y de l’élément
:type posy:
un entier
:param largeur:
Largeur de l’élément en caractères
:type largeur:
un entier
:param hauteur:
Hauteur de l’élément en caractères
:type hauteur:
un entier
:param couleur:
Couleur de l’élément
:type couleur:
un entier, une chaîne de caractères ou None
:param fond:
Couleur de fond du conteneur
:type couleur:
un entier, une chaîne de caractères ou None
"""
assert isinstance(posx, int)
assert isinstance(posy, int)
assert isinstance(largeur, int)
assert isinstance(hauteur, int)
assert isinstance(couleur, (str, int)) or couleur == None
assert isinstance(fond, (str, int)) or fond == None
# Initialisation des attributs
self.elements = []
self.element_actif = None
self.fond = fond
UI.__init__(self, minitel, posx, posy, largeur, hauteur, couleur)
def gere_touche(self, sequence):
"""Gestion des touches
Cette méthode est appelée automatiquement par la méthode executer.
Elle tente avant tout de faire traiter la touche par l’élément actif.
Si l’élément actif ne gère pas la touche, le conteneur teste si les
touches ENTREE ou MAJ ENTREE ont été pressées. Ces deux touches
permettent à l’utilisateur de naviguer entre les éléments.
En cas de changement d’élément actif, le conteneur appelle la méthode
gere_depart de l’ancien élément actif et la méthode gere_arrivee du
nouvel élément actif.
:param sequence:
La séquence reçue du Minitel.
:type sequence:
un objet Sequence
:returns:
True si la touche a été gérée par le conteneur ou l’un de ses
éléments, False sinon.
"""
assert isinstance(sequence, Sequence)
# Aucun élement actif ? Donc rien à faire
if self.element_actif == None:
return False
# Fait suivre la séquence à l’élément actif
touche_geree = self.element_actif.gere_touche(sequence)
# Si l’élément actif a traité la séquence, c’est fini
if touche_geree:
return True
# Si l’élément actif n’a pas traité la séquence, regarde si le
# conteneur peut la traiter
# La touche entrée permet de passer au champ suivant
if sequence.egale(ENTREE):
self.element_actif.gere_depart()
self.suivant()
self.element_actif.gere_arrivee()
return True
# La combinaison Majuscule + entrée permet de passer au champ précédent
if sequence.egale(MAJ_ENTREE):
self.element_actif.gere_depart()
self.precedent()
self.element_actif.gere_arrivee()
return True
return False
def affiche(self):
"""Affichage du conteneur et de ses éléments
À l’appel de cette méthode, le conteneur dessine le fond si la couleur
de fond a été définie. Ensuite, elle demande à chacun des éléments
contenus de se dessiner.
Note:
Les coordonnées du conteneur et les coordonnées des éléments sont
indépendantes.
"""
# Colorie le fond du conteneur si une couleur de fond a été définie
if self.fond != None:
for posy in range(self.posy, self.posy + self.hauteur):
self.minitel.position(self.posx, posy)
self.minitel.couleur(fond = self.fond)
self.minitel.repeter(' ', self.largeur)
# Demande à chaque élément de s’afficher
for element in self.elements:
element.affiche()
# Si un élément actif a été défini, on lui donne la main
if self.element_actif != None:
self.element_actif.gere_arrivee()
def ajoute(self, element):
"""Ajout d’un élément au conteneur
Le conteneur maintient une liste ordonnées de ses éléments.
Quand un élément est ajouté, si sa couleur n’a pas été définie, il
prend celle du conteneur.
Si aucun élément du conteneur n’est actif et que l’élément ajouté est
activable, il devient automatiquement l’élément actif pour le
conteneur.
:param element:
l’élément à ajouter à la liste ordonnée.
:type element:
un objet de classe UI ou de ses descendantes.
"""
assert isinstance(element, UI)
assert element not in self.elements
# Attribue la couleur du conteneur à l’élément par défaut
if element.couleur == None:
element.couleur = self.couleur
# Ajoute l’élément à la liste d’éléments du conteneur
self.elements.append(element)
if self.element_actif == None and element.activable == True:
self.element_actif = element
def suivant(self):
"""Passe à l’élément actif suivant
Cette méthode sélectionne le prochain élément activable dans la liste
à partir de l’élément actif.
:returns:
True si un élément actif suivant a été trouvé et sélectionné,
False sinon.
"""
# S’il n’y a pas d’éléments, il ne peut pas y avoir d’élément actif
if len(self.elements) == 0:
return False
# Récupère l’index de l’élément actif
if self.element_actif == None:
index = -1
else:
index = self.elements.index(self.element_actif)
# Recherche l’élément suivant qui soit activable
while index < len(self.elements) - 1:
index += 1
if self.elements[index].activable == True:
self.element_actif = self.elements[index]
return True
return False
def precedent(self):
"""Passe à l’élément actif précédent
Cette méthode sélectionne l’élément activable précédent dans la liste
à partir de l’élément actif.
:returns:
True si un élément actif précédent a été trouvé et sélectionné,
False sinon.
"""
# S’il n’y a pas d’éléments, il ne peut pas y avoir d’élément actif
if len(self.elements) == 0:
return False
# Récupère l’index de l’élément actif
if self.element_actif == None:
index = len(self.elements)
else:
index = self.elements.index(self.element_actif)
# Recherche l’élément suivant qui soit activable
while index > 0:
index -= 1
if self.elements[index].activable == True:
self.element_actif = self.elements[index]
return True
return False
| gpl-3.0 |
hortonworks/hortonworks-sandbox | desktop/core/ext-py/processing/lib/forking.py | 1 | 12412 | #
# Module for starting a process object using os.fork() or CreateProcess()
#
# processing/forking.py
#
# Copyright (c) 2006-2008, R Oudkerk --- see COPYING.txt
#
import os
import sys
import signal
import processing
__all__ = ['Popen', 'assertSpawning', 'exit']
#
# Check that the current thread is spawining a child process
#
def assertSpawning(self):
if not thisThreadIsSpawning():
raise RuntimeError, \
('%s objects should only be shared between '
'processes through inheritance' % type(self).__name__)
#
# Unix
#
if sys.platform != 'win32':
import time
import errno
exit = os._exit
def thisThreadIsSpawning():
return False
#
# We define a Popen class similar to the one from subprocess, but
# whose constructor takes a process object as its argument, and which
# has terminate() and waitTimeout() methods.
#
class Popen(object):
def __init__(self, process_obj):
sys.stdout.flush()
sys.stderr.flush()
self.returncode = None
self.pid = os.fork()
if self.pid == 0:
if 'random' in sys.modules:
import random
random.seed()
code = process_obj._bootstrap()
sys.stdout.flush()
sys.stderr.flush()
os._exit(code)
def wait(self):
return self.poll(0)
def poll(self, flag=os.WNOHANG):
if self.returncode is None:
pid, sts = os.waitpid(self.pid, flag)
if pid == self.pid:
if os.WIFSIGNALED(sts):
self.returncode = -os.WTERMSIG(sts)
else:
assert os.WIFEXITED(sts)
self.returncode = os.WEXITSTATUS(sts)
return self.returncode
def waitTimeout(self, timeout):
deadline = time.time() + timeout
delay = 0.0005
while 1:
res = self.poll()
if res is not None:
break
remaining = deadline - time.time()
if remaining <= 0:
break
delay = min(delay * 2, remaining, 0.05)
time.sleep(delay)
return res
def terminate(self):
if self.returncode is None:
try:
os.kill(self.pid, signal.SIGTERM)
except OSError, e:
if self.waitTimeout(0.1) is None:
raise
#
# Windows
#
else:
import imp, thread, msvcrt, _subprocess
from os.path import dirname, splitext, basename, abspath
from cPickle import dump, load, HIGHEST_PROTOCOL
from processing._processing import win32
from processing._processing import _hInterruptEvent, _main_thread_ident
from processing.finalize import Finalize
TERMINATE = 0x10000
WINEXE = (sys.platform == 'win32' and getattr(sys, 'frozen', False))
exit = win32.ExitProcess
tls = thread._local()
def thisThreadIsSpawning():
return getattr(tls, 'is_spawning', False)
#
# We define a Popen class similar to the one from subprocess, but
# whose constructor takes a process object as its argument, and which
# has terminate() and waitTimeout() methods.
#
class Popen(object):
'''
Start a subprocess to run the code of a process object
'''
def __init__(self, process_obj):
# create pipe for communication with child
r, w = os.pipe()
# get handle for read end of the pipe and make it inheritable
rhandle = msvcrt.get_osfhandle(r)
win32.SetHandleInformation(
rhandle, win32.HANDLE_FLAG_INHERIT, win32.HANDLE_FLAG_INHERIT
)
# start process
cmd = getCommandLine() + [rhandle]
cmd = ' '.join('"%s"' % x for x in cmd)
hp, ht, pid, tid = _subprocess.CreateProcess(
sys.executable, cmd, None, None, 1, 0, None, None, None
)
os.close(r)
ht.Close()
# set attributes of self
self.pid = pid
self.returncode = None
self._handle = hp
# send information to child
prep_data = getPreparationData(process_obj._name)
to_child = os.fdopen(w, 'wb')
tls.is_spawning = True
try:
dump(prep_data, to_child, HIGHEST_PROTOCOL)
dump(process_obj, to_child, HIGHEST_PROTOCOL)
finally:
tls.is_spawning = False
to_child.close()
def waitTimeout(self, timeout):
if self.returncode is None:
if timeout is None:
msecs = win32.INFINITE
else:
msecs = int(timeout * 1000 + 0.5)
if _main_thread_ident == thread.get_ident():
win32.ResetEvent(_hInterruptEvent)
handles = (int(self._handle), _hInterruptEvent)
else:
handles = (int(self._handle),)
res = win32.WaitForMultipleObjects(
len(handles), handles, False, msecs
)
if res == win32.WAIT_OBJECT_0:
code = win32.GetExitCodeProcess(int(self._handle))
if code == TERMINATE:
code = -signal.SIGTERM
self.returncode = code
return self.returncode
def wait(self):
return self.waitTimeout(None)
def poll(self):
return self.waitTimeout(0)
def terminate(self):
if self.returncode is None:
try:
win32.TerminateProcess(int(self._handle), TERMINATE)
except WindowsError:
if self.waitTimeout(0.1) is None:
raise
#
#
#
def isForking(argv):
'''
Return whether commandline indicates we are forking
'''
if len(argv) >= 2 and argv[1] == '--processing-fork':
assert len(argv) == 3
return True
else:
return False
def freezeSupport():
'''
Run code for process object if this in not the main process
'''
if isForking(sys.argv):
main()
sys.exit()
def getCommandLine():
'''
Returns prefix of command line used for spawning a child process
'''
if processing.currentProcess()._identity==() and isForking(sys.argv):
raise RuntimeError, '''
Attempt to start a new process before the current process
has finished its bootstrapping phase.
This probably means that you are on Windows and you have
forgotten to use the proper idiom in the main module:
if __name__ == '__main__':
freezeSupport()
...
The "freezeSupport()" line can be omitted if the program
is not going to be frozen to produce a Windows executable.'''
prog = 'from processing.forking import main; main()'
if getattr(sys, 'frozen', False):
return [sys.executable, '--processing-fork']
elif sys.executable.lower().endswith('pythonservice.exe'):
exe = os.path.join(os.path.dirname(os.__file__),'..','python.exe')
return [exe, '-c', prog, '--processing-fork']
else:
return [sys.executable, '-c', prog, '--processing-fork']
def getPreparationData(name):
'''
Return info about parent needed by child to unpickle process object
'''
from processing.logger import _logger
if _logger is not None:
log_args = (_logger.getEffectiveLevel(),) + _logger._extra_args
else:
log_args = None
if sys.argv[0] not in ('', '-c') and not WINEXE:
mainpath = getattr(sys.modules['__main__'], '__file__', None)
if mainpath is not None and not os.path.isabs(mainpath):
# we will assume os.chdir() was not used between program
# start up and the first import of processing
mainpath = os.path.join(processing.ORIGINAL_DIR, mainpath)
else:
mainpath = None
return [name, mainpath, sys.path, sys.argv,
processing.currentProcess().getAuthKey(),
None, processing.ORIGINAL_DIR, log_args]
def prepare(name, mainpath, sys_path, sys_argv, authkey,
cur_dir, orig_dir, log_args):
'''
Try to get this process ready to unpickle process object
'''
global original_main_module
original_main_module = sys.modules['__main__']
processing.currentProcess().setName(name)
processing.currentProcess().setAuthKey(authkey)
if log_args is not None:
from processing.logger import enableLogging
enableLogging(*log_args)
if orig_dir is not None:
processing.ORIGINAL_DIR = orig_dir
if cur_dir is not None:
try:
os.chdir(cur_dir)
except OSError:
raise
if sys_path is not None:
sys.path = sys_path
if mainpath is not None:
mainname = splitext(basename(mainpath))[0]
if mainname == '__init__':
mainname = basename(dirname(mainpath))
if not mainpath.lower().endswith('.exe') and mainname != 'ipython':
if mainpath is None:
dirs = None
elif basename(mainpath).startswith('__init__.py'):
dirs = [dirname(dirname(mainpath))]
else:
dirs = [dirname(mainpath)]
assert mainname not in sys.modules, mainname
file, pathname, etc = imp.find_module(mainname, dirs)
try:
# We would like to do "imp.load_module('__main__', ...)"
# here. However, that would cause 'if __name__ ==
# "__main__"' clauses to be executed.
main_module = imp.load_module(
'__parents_main__', file, pathname, etc
)
finally:
if file:
file.close()
sys.modules['__main__'] = main_module
main_module.__name__ = '__main__'
# XXX Try to make the potentially picklable objects in
# sys.modules['__main__'] realize they are in the main
# module -- ugly
for obj in main_module.__dict__.values():
try:
if obj.__module__ == '__parents_main__':
obj.__module__ = '__main__'
except (KeyboardInterrupt, SystemExit):
raise
except:
pass
if sys_argv is not None: # this needs to come last
sys.argv = sys_argv
def main():
'''
Run code specifed by data received over pipe
'''
assert isForking(sys.argv)
handle = int(sys.argv[-1])
fd = msvcrt.open_osfhandle(handle, os.O_RDONLY)
from_parent = os.fdopen(fd, 'rb')
processing.currentProcess()._inheriting = True
preparation_data = load(from_parent)
prepare(*preparation_data)
self = load(from_parent)
processing.currentProcess()._inheriting = False
from_parent.close()
exitcode = self._bootstrap()
win32.ExitProcess(exitcode)
| apache-2.0 |
Godiyos/python-for-android | python3-alpha/python3-src/Lib/tkinter/dnd.py | 151 | 11488 | """Drag-and-drop support for Tkinter.
This is very preliminary. I currently only support dnd *within* one
application, between different windows (or within the same window).
I an trying to make this as generic as possible -- not dependent on
the use of a particular widget or icon type, etc. I also hope that
this will work with Pmw.
To enable an object to be dragged, you must create an event binding
for it that starts the drag-and-drop process. Typically, you should
bind <ButtonPress> to a callback function that you write. The function
should call Tkdnd.dnd_start(source, event), where 'source' is the
object to be dragged, and 'event' is the event that invoked the call
(the argument to your callback function). Even though this is a class
instantiation, the returned instance should not be stored -- it will
be kept alive automatically for the duration of the drag-and-drop.
When a drag-and-drop is already in process for the Tk interpreter, the
call is *ignored*; this normally averts starting multiple simultaneous
dnd processes, e.g. because different button callbacks all
dnd_start().
The object is *not* necessarily a widget -- it can be any
application-specific object that is meaningful to potential
drag-and-drop targets.
Potential drag-and-drop targets are discovered as follows. Whenever
the mouse moves, and at the start and end of a drag-and-drop move, the
Tk widget directly under the mouse is inspected. This is the target
widget (not to be confused with the target object, yet to be
determined). If there is no target widget, there is no dnd target
object. If there is a target widget, and it has an attribute
dnd_accept, this should be a function (or any callable object). The
function is called as dnd_accept(source, event), where 'source' is the
object being dragged (the object passed to dnd_start() above), and
'event' is the most recent event object (generally a <Motion> event;
it can also be <ButtonPress> or <ButtonRelease>). If the dnd_accept()
function returns something other than None, this is the new dnd target
object. If dnd_accept() returns None, or if the target widget has no
dnd_accept attribute, the target widget's parent is considered as the
target widget, and the search for a target object is repeated from
there. If necessary, the search is repeated all the way up to the
root widget. If none of the target widgets can produce a target
object, there is no target object (the target object is None).
The target object thus produced, if any, is called the new target
object. It is compared with the old target object (or None, if there
was no old target widget). There are several cases ('source' is the
source object, and 'event' is the most recent event object):
- Both the old and new target objects are None. Nothing happens.
- The old and new target objects are the same object. Its method
dnd_motion(source, event) is called.
- The old target object was None, and the new target object is not
None. The new target object's method dnd_enter(source, event) is
called.
- The new target object is None, and the old target object is not
None. The old target object's method dnd_leave(source, event) is
called.
- The old and new target objects differ and neither is None. The old
target object's method dnd_leave(source, event), and then the new
target object's method dnd_enter(source, event) is called.
Once this is done, the new target object replaces the old one, and the
Tk mainloop proceeds. The return value of the methods mentioned above
is ignored; if they raise an exception, the normal exception handling
mechanisms take over.
The drag-and-drop processes can end in two ways: a final target object
is selected, or no final target object is selected. When a final
target object is selected, it will always have been notified of the
potential drop by a call to its dnd_enter() method, as described
above, and possibly one or more calls to its dnd_motion() method; its
dnd_leave() method has not been called since the last call to
dnd_enter(). The target is notified of the drop by a call to its
method dnd_commit(source, event).
If no final target object is selected, and there was an old target
object, its dnd_leave(source, event) method is called to complete the
dnd sequence.
Finally, the source object is notified that the drag-and-drop process
is over, by a call to source.dnd_end(target, event), specifying either
the selected target object, or None if no target object was selected.
The source object can use this to implement the commit action; this is
sometimes simpler than to do it in the target's dnd_commit(). The
target's dnd_commit() method could then simply be aliased to
dnd_leave().
At any time during a dnd sequence, the application can cancel the
sequence by calling the cancel() method on the object returned by
dnd_start(). This will call dnd_leave() if a target is currently
active; it will never call dnd_commit().
"""
import tkinter
# The factory function
def dnd_start(source, event):
h = DndHandler(source, event)
if h.root:
return h
else:
return None
# The class that does the work
class DndHandler:
root = None
def __init__(self, source, event):
if event.num > 5:
return
root = event.widget._root()
try:
root.__dnd
return # Don't start recursive dnd
except AttributeError:
root.__dnd = self
self.root = root
self.source = source
self.target = None
self.initial_button = button = event.num
self.initial_widget = widget = event.widget
self.release_pattern = "<B%d-ButtonRelease-%d>" % (button, button)
self.save_cursor = widget['cursor'] or ""
widget.bind(self.release_pattern, self.on_release)
widget.bind("<Motion>", self.on_motion)
widget['cursor'] = "hand2"
def __del__(self):
root = self.root
self.root = None
if root:
try:
del root.__dnd
except AttributeError:
pass
def on_motion(self, event):
x, y = event.x_root, event.y_root
target_widget = self.initial_widget.winfo_containing(x, y)
source = self.source
new_target = None
while target_widget:
try:
attr = target_widget.dnd_accept
except AttributeError:
pass
else:
new_target = attr(source, event)
if new_target:
break
target_widget = target_widget.master
old_target = self.target
if old_target is new_target:
if old_target:
old_target.dnd_motion(source, event)
else:
if old_target:
self.target = None
old_target.dnd_leave(source, event)
if new_target:
new_target.dnd_enter(source, event)
self.target = new_target
def on_release(self, event):
self.finish(event, 1)
def cancel(self, event=None):
self.finish(event, 0)
def finish(self, event, commit=0):
target = self.target
source = self.source
widget = self.initial_widget
root = self.root
try:
del root.__dnd
self.initial_widget.unbind(self.release_pattern)
self.initial_widget.unbind("<Motion>")
widget['cursor'] = self.save_cursor
self.target = self.source = self.initial_widget = self.root = None
if target:
if commit:
target.dnd_commit(source, event)
else:
target.dnd_leave(source, event)
finally:
source.dnd_end(target, event)
# ----------------------------------------------------------------------
# The rest is here for testing and demonstration purposes only!
class Icon:
def __init__(self, name):
self.name = name
self.canvas = self.label = self.id = None
def attach(self, canvas, x=10, y=10):
if canvas is self.canvas:
self.canvas.coords(self.id, x, y)
return
if self.canvas:
self.detach()
if not canvas:
return
label = tkinter.Label(canvas, text=self.name,
borderwidth=2, relief="raised")
id = canvas.create_window(x, y, window=label, anchor="nw")
self.canvas = canvas
self.label = label
self.id = id
label.bind("<ButtonPress>", self.press)
def detach(self):
canvas = self.canvas
if not canvas:
return
id = self.id
label = self.label
self.canvas = self.label = self.id = None
canvas.delete(id)
label.destroy()
def press(self, event):
if dnd_start(self, event):
# where the pointer is relative to the label widget:
self.x_off = event.x
self.y_off = event.y
# where the widget is relative to the canvas:
self.x_orig, self.y_orig = self.canvas.coords(self.id)
def move(self, event):
x, y = self.where(self.canvas, event)
self.canvas.coords(self.id, x, y)
def putback(self):
self.canvas.coords(self.id, self.x_orig, self.y_orig)
def where(self, canvas, event):
# where the corner of the canvas is relative to the screen:
x_org = canvas.winfo_rootx()
y_org = canvas.winfo_rooty()
# where the pointer is relative to the canvas widget:
x = event.x_root - x_org
y = event.y_root - y_org
# compensate for initial pointer offset
return x - self.x_off, y - self.y_off
def dnd_end(self, target, event):
pass
class Tester:
def __init__(self, root):
self.top = tkinter.Toplevel(root)
self.canvas = tkinter.Canvas(self.top, width=100, height=100)
self.canvas.pack(fill="both", expand=1)
self.canvas.dnd_accept = self.dnd_accept
def dnd_accept(self, source, event):
return self
def dnd_enter(self, source, event):
self.canvas.focus_set() # Show highlight border
x, y = source.where(self.canvas, event)
x1, y1, x2, y2 = source.canvas.bbox(source.id)
dx, dy = x2-x1, y2-y1
self.dndid = self.canvas.create_rectangle(x, y, x+dx, y+dy)
self.dnd_motion(source, event)
def dnd_motion(self, source, event):
x, y = source.where(self.canvas, event)
x1, y1, x2, y2 = self.canvas.bbox(self.dndid)
self.canvas.move(self.dndid, x-x1, y-y1)
def dnd_leave(self, source, event):
self.top.focus_set() # Hide highlight border
self.canvas.delete(self.dndid)
self.dndid = None
def dnd_commit(self, source, event):
self.dnd_leave(source, event)
x, y = source.where(self.canvas, event)
source.attach(self.canvas, x, y)
def test():
root = tkinter.Tk()
root.geometry("+1+1")
tkinter.Button(command=root.quit, text="Quit").pack()
t1 = Tester(root)
t1.top.geometry("+1+60")
t2 = Tester(root)
t2.top.geometry("+120+60")
t3 = Tester(root)
t3.top.geometry("+240+60")
i1 = Icon("ICON1")
i2 = Icon("ICON2")
i3 = Icon("ICON3")
i1.attach(t1.canvas)
i2.attach(t2.canvas)
i3.attach(t3.canvas)
root.mainloop()
if __name__ == '__main__':
test()
| apache-2.0 |
Microvellum/Fluid-Designer | win64-vc/2.78/python/lib/reportlab/pdfbase/_fontdata_enc_symbol.py | 56 | 3187 | SymbolEncoding = (
None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None,
None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, 'space',
'exclam', 'universal', 'numbersign', 'existential', 'percent', 'ampersand', 'suchthat',
'parenleft', 'parenright', 'asteriskmath', 'plus', 'comma', 'minus', 'period', 'slash', 'zero',
'one', 'two', 'three', 'four', 'five', 'six', 'seven', 'eight', 'nine', 'colon', 'semicolon',
'less', 'equal', 'greater', 'question', 'congruent', 'Alpha', 'Beta', 'Chi', 'Delta', 'Epsilon',
'Phi', 'Gamma', 'Eta', 'Iota', 'theta1', 'Kappa', 'Lambda', 'Mu', 'Nu', 'Omicron', 'Pi', 'Theta',
'Rho', 'Sigma', 'Tau', 'Upsilon', 'sigma1', 'Omega', 'Xi', 'Psi', 'Zeta', 'bracketleft',
'therefore', 'bracketright', 'perpendicular', 'underscore', 'radicalex', 'alpha', 'beta', 'chi',
'delta', 'epsilon', 'phi', 'gamma', 'eta', 'iota', 'phi1', 'kappa', 'lambda', 'mu', 'nu',
'omicron', 'pi', 'theta', 'rho', 'sigma', 'tau', 'upsilon', 'omega1', 'omega', 'xi', 'psi', 'zeta',
'braceleft', 'bar', 'braceright', 'similar', None, None, None, None, None, None, None, None, None,
None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None,
None, None, None, None, None, None, None, None, 'Euro', 'Upsilon1', 'minute', 'lessequal',
'fraction', 'infinity', 'florin', 'club', 'diamond', 'heart', 'spade', 'arrowboth', 'arrowleft',
'arrowup', 'arrowright', 'arrowdown', 'degree', 'plusminus', 'second', 'greaterequal', 'multiply',
'proportional', 'partialdiff', 'bullet', 'divide', 'notequal', 'equivalence', 'approxequal',
'ellipsis', 'arrowvertex', 'arrowhorizex', 'carriagereturn', 'aleph', 'Ifraktur', 'Rfraktur',
'weierstrass', 'circlemultiply', 'circleplus', 'emptyset', 'intersection', 'union',
'propersuperset', 'reflexsuperset', 'notsubset', 'propersubset', 'reflexsubset', 'element',
'notelement', 'angle', 'gradient', 'registerserif', 'copyrightserif', 'trademarkserif', 'product',
'radical', 'dotmath', 'logicalnot', 'logicaland', 'logicalor', 'arrowdblboth', 'arrowdblleft',
'arrowdblup', 'arrowdblright', 'arrowdbldown', 'lozenge', 'angleleft', 'registersans',
'copyrightsans', 'trademarksans', 'summation', 'parenlefttp', 'parenleftex', 'parenleftbt',
'bracketlefttp', 'bracketleftex', 'bracketleftbt', 'bracelefttp', 'braceleftmid', 'braceleftbt',
'braceex', None, 'angleright', 'integral', 'integraltp', 'integralex', 'integralbt',
'parenrighttp', 'parenrightex', 'parenrightbt', 'bracketrighttp', 'bracketrightex',
'bracketrightbt', 'bracerighttp', 'bracerightmid', 'bracerightbt', None)
| gpl-3.0 |
jamiefolsom/edx-platform | lms/djangoapps/django_comment_client/permissions.py | 56 | 7646 | """
Module for checking permissions with the comment_client backend
"""
import logging
from types import NoneType
from request_cache.middleware import RequestCache
from lms.lib.comment_client import Thread
from opaque_keys.edx.keys import CourseKey
from django_comment_common.models import all_permissions_for_user_in_course
from teams.models import CourseTeam
def has_permission(user, permission, course_id=None):
assert isinstance(course_id, (NoneType, CourseKey))
request_cache_dict = RequestCache.get_request_cache().data
cache_key = "django_comment_client.permissions.has_permission.all_permissions.{}.{}".format(
user.id, course_id
)
if cache_key in request_cache_dict:
all_permissions = request_cache_dict[cache_key]
else:
all_permissions = all_permissions_for_user_in_course(user, course_id)
request_cache_dict[cache_key] = all_permissions
return permission in all_permissions
CONDITIONS = ['is_open', 'is_author', 'is_question_author', 'is_team_member_if_applicable']
def get_team(commentable_id):
""" Returns the team that the commentable_id belongs to if it exists. Returns None otherwise. """
request_cache_dict = RequestCache.get_request_cache().data
cache_key = "django_comment_client.team_commentable.{}".format(commentable_id)
if cache_key in request_cache_dict:
return request_cache_dict[cache_key]
try:
team = CourseTeam.objects.get(discussion_topic_id=commentable_id)
except CourseTeam.DoesNotExist:
team = None
request_cache_dict[cache_key] = team
return team
def _check_condition(user, condition, content):
""" Check whether or not the given condition applies for the given user and content. """
def check_open(_user, content):
""" Check whether the content is open. """
try:
return content and not content['closed']
except KeyError:
return False
def check_author(user, content):
""" Check if the given user is the author of the content. """
try:
return content and content['user_id'] == str(user.id)
except KeyError:
return False
def check_question_author(user, content):
""" Check if the given user is the author of the original question for both threads and comments. """
if not content:
return False
try:
if content["type"] == "thread":
return content["thread_type"] == "question" and content["user_id"] == str(user.id)
else:
# N.B. This will trigger a comments service query
return check_question_author(user, Thread(id=content["thread_id"]).to_dict())
except KeyError:
return False
def check_team_member(user, content):
"""
If the content has a commentable_id, verifies that either it is not associated with a team,
or if it is, that the user is a member of that team.
"""
if not content:
return False
try:
commentable_id = content['commentable_id']
request_cache_dict = RequestCache.get_request_cache().data
cache_key = "django_comment_client.check_team_member.{}.{}".format(user.id, commentable_id)
if cache_key in request_cache_dict:
return request_cache_dict[cache_key]
team = get_team(commentable_id)
if team is None:
passes_condition = True
else:
passes_condition = team.users.filter(id=user.id).exists()
request_cache_dict[cache_key] = passes_condition
except KeyError:
# We do not expect KeyError in production-- it usually indicates an improper test mock.
logging.warning("Did not find key commentable_id in content.")
passes_condition = False
return passes_condition
handlers = {
'is_open': check_open,
'is_author': check_author,
'is_question_author': check_question_author,
'is_team_member_if_applicable': check_team_member
}
return handlers[condition](user, content)
def _check_conditions_permissions(user, permissions, course_id, content):
"""
Accepts a list of permissions and proceed if any of the permission is valid.
Note that ["can_view", "can_edit"] will proceed if the user has either
"can_view" or "can_edit" permission. To use AND operator in between, wrap them in
a list.
"""
def test(user, per, operator="or"):
if isinstance(per, basestring):
if per in CONDITIONS:
return _check_condition(user, per, content)
return has_permission(user, per, course_id=course_id)
elif isinstance(per, list) and operator in ["and", "or"]:
results = [test(user, x, operator="and") for x in per]
if operator == "or":
return True in results
elif operator == "and":
return False not in results
return test(user, permissions, operator="or")
# Note: 'edit_content' is being used as a generic way of telling if someone is a privileged user
# (forum Moderator/Admin/TA), because there is a desire that team membership does not impact privileged users.
VIEW_PERMISSIONS = {
'update_thread': ['edit_content', ['update_thread', 'is_open', 'is_author']],
'create_comment': ['edit_content', ["create_comment", "is_open", "is_team_member_if_applicable"]],
'delete_thread': ['delete_thread', ['update_thread', 'is_author']],
'update_comment': ['edit_content', ['update_comment', 'is_open', 'is_author']],
'endorse_comment': ['endorse_comment', 'is_question_author'],
'openclose_thread': ['openclose_thread'],
'create_sub_comment': ['edit_content', ['create_sub_comment', 'is_open', 'is_team_member_if_applicable']],
'delete_comment': ['delete_comment', ['update_comment', 'is_open', 'is_author']],
'vote_for_comment': ['edit_content', ['vote', 'is_open', 'is_team_member_if_applicable']],
'undo_vote_for_comment': ['edit_content', ['unvote', 'is_open', 'is_team_member_if_applicable']],
'vote_for_thread': ['edit_content', ['vote', 'is_open', 'is_team_member_if_applicable']],
'flag_abuse_for_thread': ['edit_content', ['vote', 'is_team_member_if_applicable']],
'un_flag_abuse_for_thread': ['edit_content', ['vote', 'is_team_member_if_applicable']],
'flag_abuse_for_comment': ['edit_content', ['vote', 'is_team_member_if_applicable']],
'un_flag_abuse_for_comment': ['edit_content', ['vote', 'is_team_member_if_applicable']],
'undo_vote_for_thread': ['edit_content', ['unvote', 'is_open', 'is_team_member_if_applicable']],
'pin_thread': ['openclose_thread'],
'un_pin_thread': ['openclose_thread'],
'follow_thread': ['edit_content', ['follow_thread', 'is_team_member_if_applicable']],
'follow_commentable': ['edit_content', ['follow_commentable', 'is_team_member_if_applicable']],
'unfollow_thread': ['edit_content', ['unfollow_thread', 'is_team_member_if_applicable']],
'unfollow_commentable': ['edit_content', ['unfollow_commentable', 'is_team_member_if_applicable']],
'create_thread': ['edit_content', ['create_thread', 'is_team_member_if_applicable']],
}
def check_permissions_by_view(user, course_id, content, name):
assert isinstance(course_id, CourseKey)
try:
p = VIEW_PERMISSIONS[name]
except KeyError:
logging.warning("Permission for view named %s does not exist in permissions.py", name)
return _check_conditions_permissions(user, p, course_id, content)
| agpl-3.0 |
Comunitea/OCB | addons/account_check_writing/account.py | 379 | 2032 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv,fields
class account_journal(osv.osv):
_inherit = "account.journal"
_columns = {
'allow_check_writing': fields.boolean('Allow Check writing', help='Check this if the journal is to be used for writing checks.'),
'use_preprint_check': fields.boolean('Use Preprinted Check', help='Check if you use a preformated sheet for check'),
}
class res_company(osv.osv):
_inherit = "res.company"
_columns = {
'check_layout': fields.selection([
('top', 'Check on Top'),
('middle', 'Check in middle'),
('bottom', 'Check on bottom'),
],"Check Layout",
help="Check on top is compatible with Quicken, QuickBooks and Microsoft Money. Check in middle is compatible with Peachtree, ACCPAC and DacEasy. Check on bottom is compatible with Peachtree, ACCPAC and DacEasy only" ),
}
_defaults = {
'check_layout' : lambda *a: 'top',
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
rahushen/ansible | lib/ansible/modules/network/eos/eos_static_route.py | 26 | 7482 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2017, Ansible by Red Hat, inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = """
---
module: eos_static_route
version_added: "2.5"
author: "Trishna Guha (@trishnaguha)"
short_description: Manage static IP routes on Arista EOS network devices
description:
- This module provides declarative management of static
IP routes on Arista EOS network devices.
notes:
- Tested against EOS 4.15
options:
address:
description:
- Network address with prefix of the static route.
required: true
aliases: ['prefix']
next_hop:
description:
- Next hop IP of the static route.
required: true
admin_distance:
description:
- Admin distance of the static route.
default: 1
aggregate:
description: List of static route definitions
state:
description:
- State of the static route configuration.
default: present
choices: ['present', 'absent']
extends_documentation_fragment: eos
"""
EXAMPLES = """
- name: configure static route
eos_static_route:
address: 10.0.2.0/24
next_hop: 10.8.38.1
admin_distance: 2
- name: delete static route
eos_static_route:
address: 10.0.2.0/24
next_hop: 10.8.38.1
state: absent
- name: configure static routes using aggregate
eos_static_route:
aggregate:
- { address: 10.0.1.0/24, next_hop: 10.8.38.1 }
- { address: 10.0.3.0/24, next_hop: 10.8.38.1 }
- name: Delete static route using aggregate
eos_static_route:
aggregate:
- { address: 10.0.1.0/24, next_hop: 10.8.38.1 }
- { address: 10.0.3.0/24, next_hop: 10.8.38.1 }
state: absent
"""
RETURN = """
commands:
description: The list of configuration mode commands to send to the device
returned: always
type: list
sample:
- ip route 10.0.2.0/24 10.8.38.1 3
- no ip route 10.0.2.0/24 10.8.38.1
"""
import re
from copy import deepcopy
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.common.utils import is_masklen, validate_ip_address
from ansible.module_utils.network.common.utils import remove_default_spec, validate_prefix
from ansible.module_utils.network.eos.eos import get_config, load_config
from ansible.module_utils.network.eos.eos import eos_argument_spec, check_args
def is_address(value):
if value:
address = value.split('/')
if is_masklen(address[1]) and validate_ip_address(address[0]):
return True
return False
def is_hop(value):
if value:
if validate_ip_address(value):
return True
return False
def map_obj_to_commands(updates, module):
commands = list()
want, have = updates
for w in want:
address = w['address']
next_hop = w['next_hop']
admin_distance = w['admin_distance']
state = w['state']
del w['state']
if state == 'absent' and w in have:
commands.append('no ip route %s %s' % (address, next_hop))
elif state == 'present' and w not in have:
commands.append('ip route %s %s %d' % (address, next_hop, admin_distance))
return commands
def map_params_to_obj(module, required_together=None):
obj = []
aggregate = module.params.get('aggregate')
if aggregate:
for item in aggregate:
for key in item:
if item.get(key) is None:
item[key] = module.params[key]
module._check_required_together(required_together, item)
d = item.copy()
obj.append(d)
else:
obj.append({
'address': module.params['address'].strip(),
'next_hop': module.params['next_hop'].strip(),
'admin_distance': module.params['admin_distance'],
'state': module.params['state']
})
return obj
def map_config_to_obj(module):
objs = []
try:
out = get_config(module, flags=['| include ip.route'])
except IndexError:
out = ''
if out:
lines = out.splitlines()
for line in lines:
obj = {}
add_match = re.search(r'ip route (\S+)', line, re.M)
if add_match:
address = add_match.group(1)
if is_address(address):
obj['address'] = address
hop_match = re.search(r'ip route {0} (\S+)'.format(address), line, re.M)
if hop_match:
hop = hop_match.group(1)
if is_hop(hop):
obj['next_hop'] = hop
dist_match = re.search(r'ip route {0} {1} (\d+)'.format(address, hop), line, re.M)
if dist_match:
distance = dist_match.group(1)
obj['admin_distance'] = int(distance)
else:
obj['admin_distance'] = 1
objs.append(obj)
return objs
def main():
""" main entry point for module execution
"""
element_spec = dict(
address=dict(type='str', aliases=['prefix']),
next_hop=dict(type='str'),
admin_distance=dict(default=1, type='int'),
state=dict(default='present', choices=['present', 'absent'])
)
aggregate_spec = deepcopy(element_spec)
aggregate_spec['address'] = dict(required=True)
# remove default in aggregate spec, to handle common arguments
remove_default_spec(aggregate_spec)
argument_spec = dict(
aggregate=dict(type='list', elements='dict', options=aggregate_spec),
)
argument_spec.update(element_spec)
argument_spec.update(eos_argument_spec)
required_one_of = [['aggregate', 'address']]
required_together = [['address', 'next_hop']]
mutually_exclusive = [['aggregate', 'address']]
module = AnsibleModule(argument_spec=argument_spec,
required_one_of=required_one_of,
required_together=required_together,
mutually_exclusive=mutually_exclusive,
supports_check_mode=True)
address = module.params['address']
if address is not None:
prefix = address.split('/')[-1]
if address and prefix:
if '/' not in address or not validate_ip_address(address.split('/')[0]):
module.fail_json(msg='{} is not a valid IP address'.format(address))
if not validate_prefix(prefix):
module.fail_json(msg='Length of prefix should be between 0 and 32 bits')
warnings = list()
result = {'changed': False}
if warnings:
result['warnings'] = warnings
want = map_params_to_obj(module)
have = map_config_to_obj(module)
commands = map_obj_to_commands((want, have), module)
result['commands'] = commands
if commands:
commit = not module.check_mode
response = load_config(module, commands, commit=commit)
if response.get('diff') and module._diff:
result['diff'] = {'prepared': response.get('diff')}
result['session_name'] = response.get('session')
result['changed'] = True
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
openstack/neutron | neutron/tests/functional/services/trunk/rpc/test_server.py | 2 | 1506 | # (c) Copyright 2016 Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
from neutron_lib.services.trunk import constants
from neutron.services.trunk import plugin as trunk_plugin
from neutron.tests.common import helpers
from neutron.tests.unit.plugins.ml2 import base as ml2_test_base
class TrunkSkeletonTestCase(ml2_test_base.ML2TestFramework):
def setUp(self):
super(TrunkSkeletonTestCase, self).setUp()
self.trunk_plugin = trunk_plugin.TrunkPlugin()
def test__handle_port_binding_set_device_owner(self):
helpers.register_ovs_agent(host=helpers.HOST)
with self.port() as subport:
port = (
self.trunk_plugin.
_rpc_backend._skeleton._handle_port_binding(
self.context, subport['port']['id'],
mock.ANY, helpers.HOST))
self.assertEqual(
constants.TRUNK_SUBPORT_OWNER, port['device_owner'])
| apache-2.0 |
cecep-edu/edx-platform | openedx/core/djangolib/testing/utils.py | 17 | 4853 | """
Utility classes for testing django applications.
:py:class:`CacheIsolationMixin`
A mixin helping to write tests which are isolated from cached data.
:py:class:`CacheIsolationTestCase`
A TestCase baseclass that has per-test isolated caches.
"""
import copy
from django import db
from django.core.cache import caches
from django.test import TestCase, override_settings
from django.conf import settings
from django.contrib import sites
from nose.plugins import Plugin
from request_cache.middleware import RequestCache
class CacheIsolationMixin(object):
"""
This class can be used to enable specific django caches for
specific the TestCase that it's mixed into.
Usage:
Use the ENABLED_CACHES to list the names of caches that should
be enabled in the context of this TestCase. These caches will
use a loc_mem_cache with the default settings.
Set the class variable CACHES to explicitly specify the cache settings
that should be overridden. This class will insert those values into
django.conf.settings, and will reset all named caches before each
test.
If both CACHES and ENABLED_CACHES are not None, raises an error.
"""
CACHES = None
ENABLED_CACHES = None
__settings_overrides = []
__old_settings = []
@classmethod
def start_cache_isolation(cls):
"""
Start cache isolation by overriding the settings.CACHES and
flushing the cache.
"""
cache_settings = None
if cls.CACHES is not None and cls.ENABLED_CACHES is not None:
raise Exception(
"Use either CACHES or ENABLED_CACHES, but not both"
)
if cls.CACHES is not None:
cache_settings = cls.CACHES
elif cls.ENABLED_CACHES is not None:
cache_settings = {
'default': {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
}
}
cache_settings.update({
cache_name: {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': cache_name,
'KEY_FUNCTION': 'util.memcache.safe_key',
} for cache_name in cls.ENABLED_CACHES
})
if cache_settings is None:
return
cls.__old_settings.append(copy.deepcopy(settings.CACHES))
override = override_settings(CACHES=cache_settings)
override.__enter__()
cls.__settings_overrides.append(override)
assert settings.CACHES == cache_settings
# Start with empty caches
cls.clear_caches()
@classmethod
def end_cache_isolation(cls):
"""
End cache isolation by flushing the cache and then returning
settings.CACHES to its original state.
"""
# Make sure that cache contents don't leak out after the isolation is ended
cls.clear_caches()
if cls.__settings_overrides:
cls.__settings_overrides.pop().__exit__(None, None, None)
assert settings.CACHES == cls.__old_settings.pop()
@classmethod
def clear_caches(cls):
"""
Clear all of the caches defined in settings.CACHES.
"""
# N.B. As of 2016-04-20, Django won't return any caches
# from django.core.cache.caches.all() that haven't been
# accessed using caches[name] previously, so we loop
# over our list of overridden caches, instead.
for cache in settings.CACHES:
caches[cache].clear()
# The sites framework caches in a module-level dictionary.
# Clear that.
sites.models.SITE_CACHE.clear()
RequestCache.clear_request_cache()
class CacheIsolationTestCase(CacheIsolationMixin, TestCase):
"""
A TestCase that isolates caches (as described in
:py:class:`CacheIsolationMixin`) at class setup, and flushes the cache
between every test.
"""
@classmethod
def setUpClass(cls):
super(CacheIsolationTestCase, cls).setUpClass()
cls.start_cache_isolation()
@classmethod
def tearDownClass(cls):
cls.end_cache_isolation()
super(CacheIsolationTestCase, cls).tearDownClass()
def setUp(self):
super(CacheIsolationTestCase, self).setUp()
self.clear_caches()
self.addCleanup(self.clear_caches)
class NoseDatabaseIsolation(Plugin):
"""
nosetest plugin that resets django databases before any tests begin.
Used to make sure that tests running in multi processes aren't sharing
a database connection.
"""
name = "database-isolation"
def begin(self):
"""
Before any tests start, reset all django database connections.
"""
for db_ in db.connections.all():
db_.close()
| agpl-3.0 |
shlomif/patool | patoolib/programs/compress.py | 1 | 1159 | # -*- coding: utf-8 -*-
# Copyright (C) 2010-2012 Bastian Kleineidam
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Archive commands for the uncompress.real program."""
from patoolib import util
def create_compress (archive, compression, cmd, *args, **kwargs):
"""Create a compressed archive."""
cmdlist = [util.shell_quote(cmd)]
if kwargs['verbose']:
cmdlist.append('-v')
cmdlist.append('-c')
cmdlist.extend([util.shell_quote(x) for x in args])
cmdlist.extend(['>', util.shell_quote(archive)])
return (cmdlist, {'shell': True})
| gpl-3.0 |
minhtuancn/odoo | addons/crm/__openerp__.py | 258 | 4199 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'CRM',
'version': '1.0',
'category': 'Customer Relationship Management',
'sequence': 2,
'summary': 'Leads, Opportunities, Phone Calls',
'description': """
The generic OpenERP Customer Relationship Management
====================================================
This application enables a group of people to intelligently and efficiently manage leads, opportunities, meetings and phone calls.
It manages key tasks such as communication, identification, prioritization, assignment, resolution and notification.
OpenERP ensures that all cases are successfully tracked by users, customers and suppliers. It can automatically send reminders, escalate the request, trigger specific methods and many other actions based on your own enterprise rules.
The greatest thing about this system is that users don't need to do anything special. The CRM module has an email gateway for the synchronization interface between mails and OpenERP. That way, users can just send emails to the request tracker.
OpenERP will take care of thanking them for their message, automatically routing it to the appropriate staff and make sure all future correspondence gets to the right place.
Dashboard for CRM will include:
-------------------------------
* Planned Revenue by Stage and User (graph)
* Opportunities by Stage (graph)
""",
'author': 'OpenERP SA',
'website': 'https://www.odoo.com/page/crm',
'depends': [
'base_action_rule',
'base_setup',
'sales_team',
'mail',
'email_template',
'calendar',
'resource',
'board',
'fetchmail',
],
'data': [
'crm_data.xml',
'crm_lead_data.xml',
'crm_phonecall_data.xml',
'security/crm_security.xml',
'security/ir.model.access.csv',
'wizard/crm_lead_to_opportunity_view.xml',
'wizard/crm_phonecall_to_phonecall_view.xml',
'wizard/crm_merge_opportunities_view.xml',
'crm_view.xml',
'crm_phonecall_view.xml',
'crm_phonecall_menu.xml',
'crm_lead_view.xml',
'crm_lead_menu.xml',
'calendar_event_menu.xml',
'report/crm_lead_report_view.xml',
'report/crm_opportunity_report_view.xml',
'report/crm_phonecall_report_view.xml',
'res_partner_view.xml',
'res_config_view.xml',
'base_partner_merge_view.xml',
'sales_team_view.xml',
],
'demo': [
'crm_demo.xml',
'crm_lead_demo.xml',
'crm_phonecall_demo.xml',
'crm_action_rule_demo.xml',
],
'test': [
'test/crm_access_group_users.yml',
'test/crm_lead_message.yml',
'test/lead2opportunity2win.yml',
'test/lead2opportunity_assign_salesmen.yml',
'test/crm_lead_merge.yml',
'test/crm_lead_cancel.yml',
'test/segmentation.yml',
'test/phonecalls.yml',
'test/crm_lead_onchange.yml',
'test/crm_lead_copy.yml',
'test/crm_lead_unlink.yml',
'test/crm_lead_find_stage.yml',
],
'installable': True,
'application': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
tntC4stl3/scrapy | scrapy/commands/crawl.py | 109 | 2520 | import os
from scrapy.commands import ScrapyCommand
from scrapy.utils.conf import arglist_to_dict
from scrapy.exceptions import UsageError
class Command(ScrapyCommand):
requires_project = True
def syntax(self):
return "[options] <spider>"
def short_desc(self):
return "Run a spider"
def add_options(self, parser):
ScrapyCommand.add_options(self, parser)
parser.add_option("-a", dest="spargs", action="append", default=[], metavar="NAME=VALUE",
help="set spider argument (may be repeated)")
parser.add_option("-o", "--output", metavar="FILE",
help="dump scraped items into FILE (use - for stdout)")
parser.add_option("-t", "--output-format", metavar="FORMAT",
help="format to use for dumping items with -o")
def process_options(self, args, opts):
ScrapyCommand.process_options(self, args, opts)
try:
opts.spargs = arglist_to_dict(opts.spargs)
except ValueError:
raise UsageError("Invalid -a value, use -a NAME=VALUE", print_help=False)
if opts.output:
if opts.output == '-':
self.settings.set('FEED_URI', 'stdout:', priority='cmdline')
else:
self.settings.set('FEED_URI', opts.output, priority='cmdline')
valid_output_formats = (
list(self.settings.getdict('FEED_EXPORTERS').keys()) +
list(self.settings.getdict('FEED_EXPORTERS_BASE').keys())
)
if not opts.output_format:
opts.output_format = os.path.splitext(opts.output)[1].replace(".", "")
if opts.output_format not in valid_output_formats:
raise UsageError("Unrecognized output format '%s', set one"
" using the '-t' switch or as a file extension"
" from the supported list %s" % (opts.output_format,
tuple(valid_output_formats)))
self.settings.set('FEED_FORMAT', opts.output_format, priority='cmdline')
def run(self, args, opts):
if len(args) < 1:
raise UsageError()
elif len(args) > 1:
raise UsageError("running 'scrapy crawl' with more than one spider is no longer supported")
spname = args[0]
self.crawler_process.crawl(spname, **opts.spargs)
self.crawler_process.start()
| bsd-3-clause |
Tarsbot/pika | tests/unit/callback_tests.py | 11 | 16337 | # -*- coding: utf8 -*-
"""
Tests for pika.callback
"""
import logging
try:
import mock
except ImportError:
from unittest import mock
try:
import unittest2 as unittest
except ImportError:
import unittest
from pika import amqp_object
from pika import callback
from pika import frame
from pika import spec
class CallbackTests(unittest.TestCase):
KEY = 'Test Key'
ARGUMENTS = callback.CallbackManager.ARGUMENTS
CALLS = callback.CallbackManager.CALLS
CALLBACK = callback.CallbackManager.CALLBACK
ONE_SHOT = callback.CallbackManager.ONE_SHOT
ONLY_CALLER = callback.CallbackManager.ONLY_CALLER
PREFIX_CLASS = spec.Basic.Consume
PREFIX = 'Basic.Consume'
ARGUMENTS_VALUE = {'foo': 'bar'}
@property
def _callback_dict(self):
return {
self.CALLBACK: self.callback_mock,
self.ONE_SHOT: True,
self.ONLY_CALLER: self.mock_caller,
self.ARGUMENTS: self.ARGUMENTS_VALUE,
self.CALLS: 1
}
def setUp(self):
self.obj = callback.CallbackManager()
self.callback_mock = mock.Mock()
self.mock_caller = mock.Mock()
def tearDown(self):
del self.obj
del self.callback_mock
del self.mock_caller
def test_initialization(self):
obj = callback.CallbackManager()
self.assertDictEqual(obj._stack, {})
def test_name_or_value_method_object(self):
value = spec.Basic.Consume()
self.assertEqual(callback.name_or_value(value), self.PREFIX)
def test_name_or_value_basic_consume_object(self):
self.assertEqual(callback.name_or_value(spec.Basic.Consume()),
self.PREFIX)
def test_name_or_value_amqpobject_class(self):
self.assertEqual(callback.name_or_value(self.PREFIX_CLASS),
self.PREFIX)
def test_name_or_value_protocol_header(self):
self.assertEqual(callback.name_or_value(frame.ProtocolHeader()),
'ProtocolHeader')
def test_name_or_value_method_frame(self):
value = frame.Method(1, self.PREFIX_CLASS())
self.assertEqual(callback.name_or_value(value), self.PREFIX)
def test_name_or_value_str(self):
value = 'Test String Value'
expectation = value
self.assertEqual(callback.name_or_value(value), expectation)
def test_name_or_value_unicode(self):
value = u'Это тест значения'
expectation = 'Это тест значения'
self.assertEqual(callback.name_or_value(value), expectation)
def test_empty_callbacks_on_init(self):
self.assertFalse(self.obj._stack)
def test_sanitize_decorator_with_args_only(self):
self.obj.add(self.PREFIX_CLASS, self.KEY, None)
self.assertIn(self.PREFIX, self.obj._stack.keys())
def test_sanitize_decorator_with_kwargs(self):
self.obj.add(prefix=self.PREFIX_CLASS, key=self.KEY, callback=None)
self.assertIn(self.PREFIX, self.obj._stack.keys())
def test_sanitize_decorator_with_mixed_args_and_kwargs(self):
self.obj.add(self.PREFIX_CLASS, key=self.KEY, callback=None)
self.assertIn(self.PREFIX, self.obj._stack.keys())
def test_add_first_time_prefix_added(self):
self.obj.add(self.PREFIX, self.KEY, None)
self.assertIn(self.PREFIX, self.obj._stack)
def test_add_first_time_key_added(self):
self.obj.add(self.PREFIX, self.KEY, None)
self.assertIn(self.KEY, self.obj._stack[self.PREFIX])
def test_add_first_time_callback_added(self):
self.obj.add(self.PREFIX, self.KEY, self.callback_mock)
self.assertEqual(
self.callback_mock,
self.obj._stack[self.PREFIX][self.KEY][0][self.CALLBACK])
def test_add_oneshot_default_is_true(self):
self.obj.add(self.PREFIX, self.KEY, None)
self.assertTrue(
self.obj._stack[self.PREFIX][self.KEY][0][self.ONE_SHOT])
def test_add_oneshot_is_false(self):
self.obj.add(self.PREFIX, self.KEY, None, False)
self.assertFalse(
self.obj._stack[self.PREFIX][self.KEY][0][self.ONE_SHOT])
def test_add_only_caller_default_is_false(self):
self.obj.add(self.PREFIX, self.KEY, None)
self.assertFalse(
self.obj._stack[self.PREFIX][self.KEY][0][self.ONLY_CALLER])
def test_add_only_caller_true(self):
self.obj.add(self.PREFIX, self.KEY, None, only_caller=True)
self.assertTrue(
self.obj._stack[self.PREFIX][self.KEY][0][self.ONLY_CALLER])
def test_add_returns_prefix_value_and_key(self):
self.assertEqual(self.obj.add(self.PREFIX, self.KEY, None),
(self.PREFIX, self.KEY))
def test_add_duplicate_callback(self):
mock_callback = mock.Mock()
def add_callback():
self.obj.add(self.PREFIX, self.KEY, mock_callback, False)
with mock.patch('pika.callback.LOGGER', spec=logging.Logger) as logger:
logger.warning = mock.Mock()
add_callback()
add_callback()
DUPLICATE_WARNING = callback.CallbackManager.DUPLICATE_WARNING
logger.warning.assert_called_once_with(DUPLICATE_WARNING,
self.PREFIX, self.KEY)
def test_add_duplicate_callback_returns_prefix_value_and_key(self):
self.obj.add(self.PREFIX, self.KEY, None)
self.assertEqual(self.obj.add(self.PREFIX, self.KEY, None),
(self.PREFIX, self.KEY))
def test_clear(self):
self.obj.add(self.PREFIX, self.KEY, None)
self.obj.clear()
self.assertDictEqual(self.obj._stack, dict())
def test_cleanup_removes_prefix(self):
OTHER_PREFIX = 'Foo'
self.obj.add(self.PREFIX, self.KEY, None)
self.obj.add(OTHER_PREFIX, 'Bar', None)
self.obj.cleanup(self.PREFIX)
self.assertNotIn(self.PREFIX, self.obj._stack)
def test_cleanup_keeps_other_prefix(self):
OTHER_PREFIX = 'Foo'
self.obj.add(self.PREFIX, self.KEY, None)
self.obj.add(OTHER_PREFIX, 'Bar', None)
self.obj.cleanup(self.PREFIX)
self.assertIn(OTHER_PREFIX, self.obj._stack)
def test_cleanup_returns_true(self):
self.obj.add(self.PREFIX, self.KEY, None)
self.assertTrue(self.obj.cleanup(self.PREFIX))
def test_missing_prefix(self):
self.assertFalse(self.obj.cleanup(self.PREFIX))
def test_pending_none(self):
self.assertIsNone(self.obj.pending(self.PREFIX_CLASS, self.KEY))
def test_pending_one(self):
self.obj.add(self.PREFIX, self.KEY, None)
self.assertEqual(self.obj.pending(self.PREFIX_CLASS, self.KEY), 1)
def test_pending_two(self):
self.obj.add(self.PREFIX, self.KEY, None)
self.obj.add(self.PREFIX, self.KEY, lambda x: True)
self.assertEqual(self.obj.pending(self.PREFIX_CLASS, self.KEY), 2)
def test_process_callback_false(self):
self.obj._stack = dict()
self.assertFalse(self.obj.process('FAIL', 'False', 'Empty',
self.mock_caller, []))
def test_process_false(self):
self.assertFalse(self.obj.process(self.PREFIX_CLASS, self.KEY, self))
def test_process_true(self):
self.obj.add(self.PREFIX, self.KEY, self.callback_mock)
self.assertTrue(self.obj.process(self.PREFIX_CLASS, self.KEY, self))
def test_process_mock_called(self):
args = (1, None, 'Hi')
self.obj.add(self.PREFIX, self.KEY, self.callback_mock)
self.obj.process(self.PREFIX, self.KEY, self, args)
self.callback_mock.assert_called_once_with(args)
def test_process_one_shot_removed(self):
args = (1, None, 'Hi')
self.obj.add(self.PREFIX, self.KEY, self.callback_mock)
self.obj.process(self.PREFIX, self.KEY, self, args)
self.assertNotIn(self.PREFIX, self.obj._stack)
def test_process_non_one_shot_prefix_not_removed(self):
self.obj.add(self.PREFIX, self.KEY, self.callback_mock, one_shot=False)
self.obj.process(self.PREFIX, self.KEY, self)
self.assertIn(self.PREFIX, self.obj._stack)
def test_process_non_one_shot_key_not_removed(self):
self.obj.add(self.PREFIX, self.KEY, self.callback_mock, one_shot=False)
self.obj.process(self.PREFIX, self.KEY, self)
self.assertIn(self.KEY, self.obj._stack[self.PREFIX])
def test_process_non_one_shot_callback_not_removed(self):
self.obj.add(self.PREFIX, self.KEY, self.callback_mock, one_shot=False)
self.obj.process(self.PREFIX, self.KEY, self)
self.assertEqual(
self.obj._stack[self.PREFIX][self.KEY][0][self.CALLBACK],
self.callback_mock)
def test_process_only_caller_fails(self):
self.obj.add(self.PREFIX_CLASS, self.KEY, self.callback_mock,
only_caller=self.mock_caller)
self.obj.process(self.PREFIX_CLASS, self.KEY, self)
self.assertFalse(self.callback_mock.called)
def test_process_only_caller_fails_no_removal(self):
self.obj.add(self.PREFIX_CLASS, self.KEY, self.callback_mock,
only_caller=self.mock_caller)
self.obj.process(self.PREFIX_CLASS, self.KEY, self)
self.assertEqual(
self.obj._stack[self.PREFIX][self.KEY][0][self.CALLBACK],
self.callback_mock)
def test_remove_with_no_callbacks_pending(self):
self.obj = callback.CallbackManager()
self.assertFalse(self.obj.remove(self.PREFIX, self.KEY,
self.callback_mock))
def test_remove_with_callback_true(self):
self.obj.add(self.PREFIX_CLASS, self.KEY, self.callback_mock)
self.assertTrue(self.obj.remove(self.PREFIX, self.KEY,
self.callback_mock))
def test_remove_with_callback_false(self):
self.obj.add(self.PREFIX_CLASS, self.KEY, None)
self.assertTrue(self.obj.remove(self.PREFIX, self.KEY,
self.callback_mock))
def test_remove_with_callback_true_empty_stack(self):
self.obj.add(self.PREFIX_CLASS, self.KEY, self.callback_mock)
self.obj.remove(prefix=self.PREFIX,
key=self.KEY,
callback_value=self.callback_mock)
self.assertDictEqual(self.obj._stack, dict())
def test_remove_with_callback_true_non_empty_stack(self):
self.obj.add(self.PREFIX_CLASS, self.KEY, self.callback_mock)
self.obj.add(self.PREFIX_CLASS, self.KEY, self.mock_caller)
self.obj.remove(self.PREFIX, self.KEY, self.callback_mock)
self.assertEqual(
self.mock_caller,
self.obj._stack[self.PREFIX][self.KEY][0][self.CALLBACK])
def test_remove_prefix_key_with_other_key_prefix_remains(self):
OTHER_KEY = 'Other Key'
self.obj.add(self.PREFIX_CLASS, self.KEY, self.callback_mock)
self.obj.add(self.PREFIX_CLASS, OTHER_KEY, self.mock_caller)
self.obj.remove(self.PREFIX, self.KEY, self.callback_mock)
self.assertIn(self.PREFIX, self.obj._stack)
def test_remove_prefix_key_with_other_key_remains(self):
OTHER_KEY = 'Other Key'
self.obj.add(self.PREFIX_CLASS, self.KEY, self.callback_mock)
self.obj.add(prefix=self.PREFIX_CLASS,
key=OTHER_KEY,
callback=self.mock_caller)
self.obj.remove(self.PREFIX, self.KEY)
self.assertIn(OTHER_KEY, self.obj._stack[self.PREFIX])
def test_remove_prefix_key_with_other_key_callback_remains(self):
OTHER_KEY = 'Other Key'
self.obj.add(self.PREFIX_CLASS, self.KEY, self.callback_mock)
self.obj.add(self.PREFIX_CLASS, OTHER_KEY, self.mock_caller)
self.obj.remove(self.PREFIX, self.KEY)
self.assertEqual(
self.mock_caller,
self.obj._stack[self.PREFIX][OTHER_KEY][0][self.CALLBACK])
def test_remove_all(self):
self.obj.add(self.PREFIX_CLASS, self.KEY, self.callback_mock)
self.obj.remove_all(self.PREFIX, self.KEY)
self.assertNotIn(self.PREFIX, self.obj._stack)
def test_should_process_callback_true(self):
self.obj.add(self.PREFIX_CLASS, self.KEY, self.callback_mock)
value = self.obj._callback_dict(self.callback_mock, False, None, None)
self.assertTrue(
self.obj._should_process_callback(value, self.mock_caller, []))
def test_should_process_callback_false_argument_fail(self):
self.obj.clear()
self.obj.add(self.PREFIX_CLASS, self.KEY, self.callback_mock,
arguments={'foo': 'baz'})
self.assertFalse(self.obj._should_process_callback(self._callback_dict,
self.mock_caller,
[{'foo': 'baz'}]))
def test_should_process_callback_false_only_caller_failure(self):
self.obj.add(self.PREFIX_CLASS, self.KEY, self.callback_mock)
value = self.obj._callback_dict(self.callback_mock, False, self, None)
self.assertTrue(
self.obj._should_process_callback(value, self.mock_caller, []))
def test_should_process_callback_false_only_caller_failure(self):
self.obj.add(self.PREFIX_CLASS, self.KEY, self.callback_mock)
value = self.obj._callback_dict(self.callback_mock, False,
self.mock_caller, None)
self.assertTrue(
self.obj._should_process_callback(value, self.mock_caller, []))
def test_dict(self):
self.assertDictEqual(self.obj._callback_dict(self.callback_mock, True,
self.mock_caller,
self.ARGUMENTS_VALUE),
self._callback_dict)
def test_arguments_match_no_arguments(self):
self.assertFalse(self.obj._arguments_match(self._callback_dict, []))
def test_arguments_match_dict_argument(self):
self.assertTrue(self.obj._arguments_match(self._callback_dict,
[self.ARGUMENTS_VALUE]))
def test_arguments_match_dict_argument_no_attribute(self):
self.assertFalse(self.obj._arguments_match(self._callback_dict, [{}]))
def test_arguments_match_dict_argument_no_match(self):
self.assertFalse(self.obj._arguments_match(self._callback_dict,
[{'foo': 'baz'}]))
def test_arguments_match_obj_argument(self):
class TestObj(object):
foo = 'bar'
test_instance = TestObj()
self.assertTrue(self.obj._arguments_match(self._callback_dict,
[test_instance]))
def test_arguments_match_obj_no_attribute(self):
class TestObj(object):
qux = 'bar'
test_instance = TestObj()
self.assertFalse(self.obj._arguments_match(self._callback_dict,
[test_instance]))
def test_arguments_match_obj_argument_no_match(self):
class TestObj(object):
foo = 'baz'
test_instance = TestObj()
self.assertFalse(self.obj._arguments_match(self._callback_dict,
[test_instance]))
def test_arguments_match_obj_argument_with_method(self):
class TestFrame(object):
method = None
class MethodObj(object):
foo = 'bar'
test_instance = TestFrame()
test_instance.method = MethodObj()
self.assertTrue(self.obj._arguments_match(self._callback_dict,
[test_instance]))
def test_arguments_match_obj_argument_with_method_no_match(self):
class TestFrame(object):
method = None
class MethodObj(object):
foo = 'baz'
test_instance = TestFrame()
test_instance.method = MethodObj()
self.assertFalse(self.obj._arguments_match(self._callback_dict,
[test_instance]))
| bsd-3-clause |
Branlala/docker-sickbeardfr | sickbeard/sickbeard/providers/btn.py | 35 | 12781 | # coding=utf-8
# Author: Dani�l Heimans
# URL: http://code.google.com/p/sickbeard
#
# This file is part of Sick Beard.
#
# Sick Beard is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Sick Beard is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Sick Beard. If not, see <http://www.gnu.org/licenses/>.
import sickbeard
import generic
from sickbeard import scene_exceptions
from sickbeard import logger
from sickbeard import tvcache
from sickbeard.helpers import sanitizeSceneName
from sickbeard.common import Quality
from sickbeard.exceptions import ex, AuthException
from lib import jsonrpclib
import datetime
import time
import socket
import math
import pprint
class BTNProvider(generic.TorrentProvider):
def __init__(self):
generic.TorrentProvider.__init__(self, "BTN")
self.supportsBacklog = True
self.cache = BTNCache(self)
self.url = "http://broadcasthe.net"
def isEnabled(self):
return sickbeard.BTN
def imageName(self):
return 'btn.png'
def checkAuthFromData(self, data):
result = True
if 'api-error' in data:
logger.log("Error in sickbeard data retrieval: " + data['api-error'], logger.ERROR)
result = False
return result
def _doSearch(self, search_params, show=None, season=None):
params = {}
apikey = sickbeard.BTN_API_KEY
if search_params:
params.update(search_params)
search_results = self._api_call(apikey, params)
if not search_results:
return []
if 'torrents' in search_results:
found_torrents = search_results['torrents']
else:
found_torrents = {}
# We got something, we know the API sends max 1000 results at a time.
# See if there are more than 1000 results for our query, if not we
# keep requesting until we've got everything.
# max 150 requests per minute so limit at that
max_pages = 150
results_per_page = 1000.0
if 'results' in search_results and search_results['results'] >= results_per_page:
pages_needed = int(math.ceil(int(search_results['results']) / results_per_page))
if pages_needed > max_pages:
pages_needed = max_pages
# +1 because range(1,4) = 1, 2, 3
for page in range(1,pages_needed+1):
search_results = self._api_call(apikey, params, results_per_page, page * results_per_page)
# Note that this these are individual requests and might time out individually. This would result in 'gaps'
# in the results. There is no way to fix this though.
if 'torrents' in search_results:
found_torrents.update(search_results['torrents'])
results = []
for torrentid, torrent_info in found_torrents.iteritems():
(title, url) = self._get_title_and_url(torrent_info)
if not title or not url:
logger.log(u"The BTN provider did not return both a valid title and URL for search parameters: " + str(params) + " but returned " + str(torrent_info), logger.WARNING)
results.append(torrent_info)
# Disabled this because it overspammed the debug log a bit too much
# logger.log(u'BTN provider returning the following results for search parameters: ' + str(params), logger.DEBUG)
# for result in results:
# (title, result) = self._get_title_and_url(result)
# logger.log(title, logger.DEBUG)
return results
def _api_call(self, apikey, params={}, results_per_page=1000, offset=0):
server = jsonrpclib.Server('http://api.btnapps.net')
search_results ={}
try:
search_results = server.getTorrentsSearch(apikey, params, int(results_per_page), int(offset))
except jsonrpclib.jsonrpc.ProtocolError, error:
logger.log(u"JSON-RPC protocol error while accessing BTN API: " + ex(error), logger.ERROR)
search_results = {'api-error': ex(error)}
return search_results
except socket.timeout:
logger.log(u"Timeout while accessing BTN API", logger.WARNING)
except socket.error, error:
# Note that sometimes timeouts are thrown as socket errors
logger.log(u"Socket error while accessing BTN API: " + error[1], logger.ERROR)
except Exception, error:
errorstring = str(error)
if(errorstring.startswith('<') and errorstring.endswith('>')):
errorstring = errorstring[1:-1]
logger.log(u"Unknown error while accessing BTN API: " + errorstring, logger.ERROR)
return search_results
def _get_title_and_url(self, search_result):
# The BTN API gives a lot of information in response,
# however Sick Beard is built mostly around Scene or
# release names, which is why we are using them here.
if 'ReleaseName' in search_result and search_result['ReleaseName']:
title = search_result['ReleaseName']
else:
# If we don't have a release name we need to get creative
title = u''
if 'Series' in search_result:
title += search_result['Series']
if 'GroupName' in search_result:
title += '.' + search_result['GroupName'] if title else search_result['GroupName']
if 'Resolution' in search_result:
title += '.' + search_result['Resolution'] if title else search_result['Resolution']
if 'Source' in search_result:
title += '.' + search_result['Source'] if title else search_result['Source']
if 'Codec' in search_result:
title += '.' + search_result['Codec'] if title else search_result['Codec']
if 'DownloadURL' in search_result:
url = search_result['DownloadURL']
else:
url = None
return (title, url)
def _get_season_search_strings(self, show, season=None):
if not show:
return [{}]
search_params = []
name_exceptions = scene_exceptions.get_scene_exceptions(show.tvdbid) + [show.name]
for name in name_exceptions:
current_params = {}
if show.tvdbid:
current_params['tvdb'] = show.tvdbid
elif show.tvrid:
current_params['tvrage'] = show.tvrid
else:
# Search by name if we don't have tvdb or tvrage id
current_params['series'] = sanitizeSceneName(name)
if season != None:
whole_season_params = current_params.copy()
partial_season_params = current_params.copy()
# Search for entire seasons: no need to do special things for air by date shows
whole_season_params['category'] = 'Season'
whole_season_params['name'] = 'Season ' + str(season)
search_params.append(whole_season_params)
# Search for episodes in the season
partial_season_params['category'] = 'Episode'
if show.air_by_date:
# Search for the year of the air by date show
partial_season_params['name'] = str(season.split('-')[0])
else:
# Search for any result which has Sxx in the name
partial_season_params['name'] = 'S%02d' % int(season)
search_params.append(partial_season_params)
else:
search_params.append(current_params)
return search_params
def _get_episode_search_strings(self, ep_obj):
if not ep_obj:
return [{}]
search_params = {'category':'Episode'}
if ep_obj.show.tvdbid:
search_params['tvdb'] = ep_obj.show.tvdbid
elif ep_obj.show.tvrid:
search_params['tvrage'] = ep_obj.show.rid
else:
search_params['series'] = sanitizeSceneName(ep_obj.show_name)
if ep_obj.show.air_by_date:
date_str = str(ep_obj.airdate)
# BTN uses dots in dates, we just search for the date since that
# combined with the series identifier should result in just one episode
search_params['name'] = date_str.replace('-','.')
else:
# Do a general name search for the episode, formatted like SXXEYY
search_params['name'] = "S%02dE%02d" % (ep_obj.season,ep_obj.episode)
to_return = [search_params]
# only do scene exceptions if we are searching by name
if 'series' in search_params:
# add new query string for every exception
name_exceptions = scene_exceptions.get_scene_exceptions(ep_obj.show.tvdbid)
for cur_exception in name_exceptions:
# don't add duplicates
if cur_exception == ep_obj.show.name:
continue
# copy all other parameters before setting the show name for this exception
cur_return = search_params.copy()
cur_return['series'] = sanitizeSceneName(cur_exception)
to_return.append(cur_return)
return to_return
def getQuality(self, item):
quality = None
(title,url) = self._get_title_and_url(item)
quality = Quality.nameQuality(title)
return quality
def _doGeneralSearch(self, search_string):
# 'search' looks as broad is it can find. Can contain episode overview and title for example,
# use with caution!
return self._doSearch({'search': search_string})
class BTNCache(tvcache.TVCache):
def __init__(self, provider):
tvcache.TVCache.__init__(self, provider)
# At least 15 minutes between queries
self.minTime = 15
def updateCache(self):
if not self.shouldUpdate():
return
data = self._getRSSData()
# As long as we got something from the provider we count it as an update
if data:
self.setLastUpdate()
else:
return []
logger.log(u"Clearing "+self.provider.name+" cache and updating with new information")
self._clearCache()
if not self._checkAuth(data):
raise AuthException("Your authentication info for "+self.provider.name+" is incorrect, check your config")
# By now we know we've got data and no auth errors, all we need to do is put it in the database
for item in data:
self._parseItem(item)
def _getRSSData(self):
# Get the torrents uploaded since last check.
seconds_since_last_update = math.ceil(time.time() - time.mktime(self._getLastUpdate().timetuple()))
# default to 15 minutes
if seconds_since_last_update < 15*60:
seconds_since_last_update = 15*60
# Set maximum to 24 hours of "RSS" data search, older things will need to be done through backlog
if seconds_since_last_update > 24*60*60:
logger.log(u"The last known successful \"RSS\" update on the BTN API was more than 24 hours ago (%i hours to be precise), only trying to fetch the last 24 hours!" %(int(seconds_since_last_update)//(60*60)), logger.WARNING)
seconds_since_last_update = 24*60*60
age_string = "<=%i" % seconds_since_last_update
search_params={'age': age_string}
data = self.provider._doSearch(search_params)
return data
def _parseItem(self, item):
(title, url) = self.provider._get_title_and_url(item)
if not title or not url:
logger.log(u"The result returned from the BTN regular search is incomplete, this result is unusable", logger.ERROR)
return
logger.log(u"Adding item from regular BTN search to cache: " + title, logger.DEBUG)
self._addCacheEntry(title, url)
def _checkAuth(self, data):
return self.provider.checkAuthFromData(data)
provider = BTNProvider() | mit |
arseneyr/essentia | test/src/unittest/stats/test_median.py | 1 | 1737 | #!/usr/bin/env python
# Copyright (C) 2006-2016 Music Technology Group - Universitat Pompeu Fabra
#
# This file is part of Essentia
#
# Essentia is free software: you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License as published by the Free
# Software Foundation (FSF), either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the Affero GNU General Public License
# version 3 along with this program. If not, see http://www.gnu.org/licenses/
from essentia_test import *
class TestMedian(TestCase):
def testEmpty(self):
self.assertComputeFails(Median(), [])
def testZero(self):
result = Median()([0]*10)
self.assertEqual(result, 0)
def testOne(self):
result = Median()([100])
self.assertEqual(result, 100)
def testMulti(self):
result = Median()([5, 8, 4, 9, 1])
self.assertEqual(result, 5)
def testNegatives(self):
result = Median()([3, 7, -45, 2, -1, 0])
self.assertEqual(result, 1)
def testRational(self):
result = Median()([3.1459, -0.4444, .00002])
self.assertAlmostEqual(result, 0.00002)
def testEvenSize(self):
result = Median()([1, 4, 3, 10])
self.assertAlmostEqual(result, 3.5)
suite = allTests(TestMedian)
if __name__ == '__main__':
TextTestRunner(verbosity=2).run(suite)
| agpl-3.0 |
tndatacommons/tndata_backend | tndata_backend/goals/migrations/0007_auto_20150210_2233.py | 2 | 10684 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import goals.models
class Migration(migrations.Migration):
dependencies = [
('goals', '0006_auto_20150209_1746'),
]
operations = [
migrations.CreateModel(
name='BehaviorAction',
fields=[
('id', models.AutoField(auto_created=True, serialize=False, verbose_name='ID', primary_key=True)),
('name', models.CharField(max_length=128, db_index=True, unique=True, help_text='Unique, informal and internal. Conversational identifier only.')),
('name_slug', models.SlugField(max_length=128, unique=True)),
('notes', models.TextField(blank=True, help_text='Misc nodes about this behavior')),
('source_notes', models.TextField(blank=True, help_text='Narrative notes about the source')),
('source_link', models.URLField(max_length=256, blank=True, null=True, help_text='A link to the source.')),
('title', models.CharField(max_length=256, db_index=True, unique=True, help_text='Unique, Formal title. Displayed as a caption in the app.')),
('description', models.TextField(blank=True, help_text='Brief description.')),
('case', models.TextField(blank=True, help_text='Brief description of why this is useful.')),
('outcome', models.TextField(blank=True, help_text='Brief description of what the user can expect to get by adopting the behavior')),
('narrative_block', models.TextField(blank=True, help_text='Persuasive narrative description, case, outcome of the behavior')),
('external_resource', models.CharField(max_length=256, blank=True, help_text='A link or reference to an outside resource necessary for adoption')),
('notification_text', models.CharField(max_length=256, blank=True, help_text='Text message delivered through notification channel')),
('icon', models.ImageField(blank=True, upload_to=goals.models._behavior_icon_path, null=True, help_text='A Small icon for the Action.')),
('image', models.ImageField(blank=True, upload_to=goals.models._behavior_img_path, null=True, help_text='Upload an image to be displayed for the Behavior Action.')),
('sequence_order', models.IntegerField(default=0, db_index=True, help_text='Order/number of action in stepwise behavior sequence')),
],
options={
'verbose_name_plural': 'Behavior Actions',
'verbose_name': 'Behavior Action',
'abstract': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='BehaviorSequence',
fields=[
('id', models.AutoField(auto_created=True, serialize=False, verbose_name='ID', primary_key=True)),
('name', models.CharField(max_length=128, db_index=True, unique=True, help_text='Unique, informal and internal. Conversational identifier only.')),
('name_slug', models.SlugField(max_length=128, unique=True)),
('notes', models.TextField(blank=True, help_text='Misc nodes about this behavior')),
('source_notes', models.TextField(blank=True, help_text='Narrative notes about the source')),
('source_link', models.URLField(max_length=256, blank=True, null=True, help_text='A link to the source.')),
('title', models.CharField(max_length=256, db_index=True, unique=True, help_text='Unique, Formal title. Displayed as a caption in the app.')),
('description', models.TextField(blank=True, help_text='Brief description.')),
('case', models.TextField(blank=True, help_text='Brief description of why this is useful.')),
('outcome', models.TextField(blank=True, help_text='Brief description of what the user can expect to get by adopting the behavior')),
('narrative_block', models.TextField(blank=True, help_text='Persuasive narrative description, case, outcome of the behavior')),
('external_resource', models.CharField(max_length=256, blank=True, help_text='A link or reference to an outside resource necessary for adoption')),
('notification_text', models.CharField(max_length=256, blank=True, help_text='Text message delivered through notification channel')),
('icon', models.ImageField(blank=True, upload_to=goals.models._behavior_icon_path, null=True, help_text='A Small icon for the Action.')),
('image', models.ImageField(blank=True, upload_to=goals.models._behavior_img_path, null=True, help_text='Upload an image to be displayed for the Behavior Action.')),
('informal_list', models.TextField(blank=True, help_text='Working list of the behavior sequence. Mnemonic only.')),
('categories', models.ManyToManyField(blank=True, null=True, to='goals.Category', help_text='Select the Categories in which this should appear.')),
],
options={
'verbose_name_plural': 'Behavior Sequences',
'verbose_name': 'Behavior Sequence',
'abstract': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Goal',
fields=[
('id', models.AutoField(auto_created=True, serialize=False, verbose_name='ID', primary_key=True)),
('name', models.CharField(max_length=128, db_index=True, unique=True, help_text='An Internal name for this goal.')),
('name_slug', models.SlugField(max_length=128, unique=True)),
('title', models.CharField(max_length=256, db_index=True, unique=True, help_text='A public Title for this goal.')),
('description', models.TextField(blank=True, help_text='Short description of this Category.')),
('outcome', models.TextField(blank=True, help_text='Desired outcome of this Goal.')),
('categories', models.ManyToManyField(blank=True, null=True, to='goals.Category', help_text='Select the Categories in which this Goal should appear.')),
('interests', models.ManyToManyField(blank=True, null=True, to='goals.Interest', help_text='Select the Interests in which this Goal should be organized.')),
],
options={
'verbose_name_plural': 'Goals',
'verbose_name': 'Goal',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Trigger',
fields=[
('id', models.AutoField(auto_created=True, serialize=False, verbose_name='ID', primary_key=True)),
('name', models.CharField(max_length=128, db_index=True, unique=True, help_text='Give this trigger a helpful name. It must be unique, and will be used in drop-down lists and other places where youcan select it later.')),
('name_slug', models.SlugField(max_length=128, unique=True)),
('trigger_type', models.CharField(max_length=10, choices=[('time', 'Time'), ('place', 'Place')], blank=True, help_text='The type of Trigger used, e.g. Time, Place, etc')),
('frequency', models.CharField(max_length=10, choices=[('one-time', 'One Time'), ('daily', 'Daily'), ('weekly', 'Weekly'), ('monthly', 'Monthly'), ('yearly', 'Yearly')], blank=True, help_text='How frequently a trigger is fired')),
('time', models.TimeField(blank=True, null=True, help_text='Time the trigger/notification will fire, in 24-hour format.')),
('date', models.DateField(blank=True, null=True, help_text='The date of the trigger/notification. If the trigger is recurring, notifications will start on this date.')),
('location', models.CharField(max_length=256, blank=True, help_text="Only used when Trigger type is location. Can be 'home', 'work', or a (lat, long) pair.")),
('text', models.CharField(max_length='140', blank=True, help_text='The Trigger text shown to the user.')),
('instruction', models.TextField(blank=True, help_text='Instructions sent to the user.')),
],
options={
'verbose_name_plural': 'Triggers',
'verbose_name': 'Trigger',
},
bases=(models.Model,),
),
migrations.AddField(
model_name='behaviorsequence',
name='default_trigger',
field=models.ForeignKey(to='goals.Trigger', blank=True, help_text='A trigger/reminder for this behavior', null=True),
preserve_default=True,
),
migrations.AddField(
model_name='behaviorsequence',
name='goals',
field=models.ManyToManyField(blank=True, null=True, to='goals.Goal', help_text='Select the Goal(s) that this Behavior achieves.'),
preserve_default=True,
),
migrations.AddField(
model_name='behaviorsequence',
name='interests',
field=models.ManyToManyField(blank=True, null=True, to='goals.Interest', help_text='Select the Interest(s) under which this should be organized.'),
preserve_default=True,
),
migrations.AddField(
model_name='behavioraction',
name='default_trigger',
field=models.ForeignKey(to='goals.Trigger', blank=True, help_text='A trigger/reminder for this behavior', null=True),
preserve_default=True,
),
migrations.AddField(
model_name='behavioraction',
name='sequence',
field=models.ForeignKey(to='goals.BehaviorSequence'),
preserve_default=True,
),
migrations.AddField(
model_name='interest',
name='title',
field=models.CharField(default='', help_text='Formal title, used publicly.', blank=True, max_length=128),
preserve_default=True,
),
migrations.AlterField(
model_name='interest',
name='description',
field=models.TextField(blank=True, help_text='Short description of this Interest.'),
preserve_default=True,
),
migrations.AlterField(
model_name='interest',
name='name',
field=models.CharField(max_length=128, db_index=True, unique=True, help_text='An informal/internal name. Conversational identifier only.'),
preserve_default=True,
),
]
| mit |
hpe-storage/python-lefthandclient | test/HPELeftHandMockServer_ssh.py | 2 | 5278 | # (c) Copyright 2015 Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
""" Test SSH server."""
import argparse
import logging
import os
import shlex
import socket
import sys
import threading
import paramiko
paramiko.util.log_to_file('paramiko_server.log')
class CliParseException(Exception):
pass
class CliArgumentParser(argparse.ArgumentParser):
def error(self, message):
usage = super(CliArgumentParser, self).format_help()
full_message = "%s\r\n%s" % (message, usage)
raise CliParseException(full_message)
def parse_args(self, *args):
return super(CliArgumentParser, self).parse_args(args[1:])
class Cli(object):
def __init__(self):
self.log_name = 'paramiko.LeftHandCLI'
self.logger = paramiko.util.get_logger(self.log_name)
self.fpgs = {}
self.vfss = {}
def do_cli_other(self, *args):
msg = 'FAIL! Mock SSH CLI does not know how to "%s".' % ' '.join(args)
self.logger.log(logging.ERROR, msg)
return msg
def do_cli_exit(self, *args):
self.logger.log(logging.INFO, "quiting... g'bye")
return ''
def do_cli_quit(self, *args):
self.logger.log(logging.INFO, "quiting... g'bye")
return ''
def process_command(self, cmd):
self.logger.log(logging.INFO, cmd)
if cmd is None:
print("returnNone")
return ''
args = shlex.split(cmd)
if args:
method = getattr(self, 'do_cli_' + args[0], self.do_cli_other)
try:
return method(*args)
except Exception as cmd_exception:
return str(cmd_exception)
else:
return ''
class ParamikoServer(paramiko.ServerInterface):
def __init__(self):
self.event = threading.Event()
def check_channel_request(self, kind, chanid):
return paramiko.OPEN_SUCCEEDED
def check_auth_none(self, username):
return paramiko.AUTH_SUCCESSFUL
def check_auth_password(self, username, password):
return paramiko.AUTH_SUCCESSFUL
def check_auth_publickey(self, username, key):
return paramiko.AUTH_SUCCESSFUL
def get_allowed_auths(self, username):
return 'password,publickey,none'
def check_channel_shell_request(self, c):
self.event.set()
return True
def check_channel_pty_request(self, c, term, width, height, pixelwidth,
pixelheight, modes):
return True
if __name__ == "__main__":
if len(sys.argv) > 1:
port = int(sys.argv[1])
else:
port = 2200
key_file = os.path.expanduser('~/.ssh/id_rsa')
host_key = paramiko.RSAKey(filename=key_file)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind(('', int(port)))
s.listen(60)
print("Listening for SSH client connections...")
connection, address = s.accept()
transport = None
channel = None
try:
transport = paramiko.Transport(connection)
transport.load_server_moduli()
transport.add_server_key(host_key)
server = ParamikoServer()
transport.start_server(server=server)
cliProcessor = Cli()
while True:
channel = transport.accept(60)
if channel is None:
print("Failed to get SSH channel.")
sys.exit(1)
print("Connected")
server.event.wait(10)
if not server.event.isSet():
print("No shell set")
sys.exit(1)
fio = channel.makefile('rU')
commands = []
command = None
while not (command == 'exit' or command == 'quit'):
command = fio.readline().strip('\r\n')
commands.append(command)
to_send = '\r\n'.join(commands)
channel.send(to_send)
output = ['']
prompt = "FAKE-LeftHand-CLI cli% "
for cmd in commands:
output.append('%s%s' % (prompt, cmd))
result = cliProcessor.process_command(cmd)
if result is not None:
output.append(result)
output_to_send = '\r\n'.join(output)
channel.send(output_to_send)
channel.close()
print("Disconnected")
finally:
if channel:
channel.close()
if transport:
try:
transport.close()
print("transport closed")
except Exception as e:
print("transport close exception %s" % e)
pass
| apache-2.0 |
JuanCTorres/interview-prep-solutions | codelab/listcycle.py | 1 | 1048 | """
https://codelab.interviewbit.com/problems/listcycle/
"""
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
# @param A : head node of linked list
# @return the first node in the cycle in the linked list
def detectCycle(self, A):
if A is None or A.next is None:
return None
i = A;
j = A
while i is not None and j is not None:
try:
i = i.next
j = i.next.next
except AttributeError:
return None
if i == j:
break
# still not guaranteed to have cycle; could have failed the while loop check
if i is None or j is None:
return None
# found cycle, now find the start
i = A
while i != j:
i = i.next
j = j.next
return i
| mit |
iwaseyusuke/ryu | ryu/services/protocols/bgp/info_base/evpn.py | 10 | 1810 | # Copyright (C) 2016 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Defines data types and models required specifically for EVPN support.
"""
import logging
from ryu.lib.packet.bgp import EvpnNLRI
from ryu.lib.packet.bgp import RF_L2_EVPN
from ryu.services.protocols.bgp.info_base.vpn import VpnDest
from ryu.services.protocols.bgp.info_base.vpn import VpnPath
from ryu.services.protocols.bgp.info_base.vpn import VpnTable
LOG = logging.getLogger('bgpspeaker.info_base.evpn')
class EvpnDest(VpnDest):
"""EVPN Destination
Store EVPN Paths.
"""
ROUTE_FAMILY = RF_L2_EVPN
class EvpnTable(VpnTable):
"""Global table to store EVPN routing information.
Uses `EvpnDest` to store destination information for each known EVPN
paths.
"""
ROUTE_FAMILY = RF_L2_EVPN
VPN_DEST_CLASS = EvpnDest
class EvpnPath(VpnPath):
"""Represents a way of reaching an EVPN destination."""
ROUTE_FAMILY = RF_L2_EVPN
VRF_PATH_CLASS = None # defined in init - anti cyclic import hack
NLRI_CLASS = EvpnNLRI
def __init__(self, *args, **kwargs):
super(EvpnPath, self).__init__(*args, **kwargs)
from ryu.services.protocols.bgp.info_base.vrfevpn import VrfEvpnPath
self.VRF_PATH_CLASS = VrfEvpnPath
| apache-2.0 |
dermoth/gramps | gramps/gui/widgets/reorderfam.py | 2 | 8092 | # Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2001-2007 Donald N. Allingham
# Copyright (C) 2009-2010 Gary Burton
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
#-------------------------------------------------------------------------
#
# Python modules
#
#-------------------------------------------------------------------------
#-------------------------------------------------------------------------
#
# Set up logging
#
#-------------------------------------------------------------------------
import logging
_LOG = logging.getLogger("gui.widgets.reorderfam")
#-------------------------------------------------------------------------
#
# GTK/Gnome modules
#
#-------------------------------------------------------------------------
#-------------------------------------------------------------------------
#
# Gramps Modules
#
#-------------------------------------------------------------------------
from gramps.gen.const import GRAMPS_LOCALE as glocale
_ = glocale.translation.sgettext
from gramps.gen.db import DbTxn
from gramps.gen.const import URL_MANUAL_PAGE
from ..listmodel import ListModel
from ..display import display_help
from ..managedwindow import ManagedWindow
from ..glade import Glade
from gramps.gen.display.name import displayer as name_displayer
#-------------------------------------------------------------------------
#
# Constants
#
#-------------------------------------------------------------------------
WIKI_HELP_PAGE = URL_MANUAL_PAGE + "_-_Categories"
WIKI_HELP_SEC = _('manual|Reorder_Relationships_dialog')
#-------------------------------------------------------------------------
#
# Reorder class
#
#-------------------------------------------------------------------------
class Reorder(ManagedWindow):
"""
Interface to reorder the families a person is parent in
"""
def __init__(self, state, uistate, track, handle):
xml = Glade('reorder.glade')
top = xml.toplevel
self.dbstate = state
ManagedWindow.__init__(self, uistate, track, self)
self.person = self.dbstate.db.get_person_from_handle(handle)
self.parent_list = self.person.get_parent_family_handle_list()
self.family_list = self.person.get_family_handle_list()
penable = len(self.parent_list) > 1
fenable = len(self.family_list) > 1
self.set_window(top, None, _("Reorder Relationships"))
self.setup_configs('interface.reorder', 500, 400)
self.ptree = xml.get_object('ptree')
self.pmodel = ListModel(self.ptree,
[(_('Father'), -1, 200),
(_('Mother'), -1, 200),
('', -1, 0)])
self.ftree = xml.get_object('ftree')
self.fmodel = ListModel(self.ftree,
[(_('Spouse'), -1, 200),
(_('Relationship'), -1, 200),
('', -1, 0)])
xml.get_object('ok').connect('clicked', self.ok_clicked)
xml.get_object('cancel').connect('clicked', self.cancel_clicked)
xml.get_object('help').connect(
'clicked', lambda x: display_help(WIKI_HELP_PAGE, WIKI_HELP_SEC))
fup = xml.get_object('fup')
fup.connect('clicked', self.fup_clicked)
fup.set_sensitive(fenable)
fdown = xml.get_object('fdown')
fdown.connect('clicked', self.fdown_clicked)
fdown.set_sensitive(fenable)
pup = xml.get_object('pup')
pup.connect('clicked', self.pup_clicked)
pup.set_sensitive(penable)
pdown = xml.get_object('pdown')
pdown.connect('clicked', self.pdown_clicked)
pdown.set_sensitive(penable)
self.fill_data()
self.show()
def fill_data(self):
self.fill_parents()
self.fill_family()
def fill_parents(self):
for handle in self.parent_list:
family = self.dbstate.db.get_family_from_handle(handle)
fhandle = family.get_father_handle()
mhandle = family.get_mother_handle()
fname = ""
if fhandle:
father = self.dbstate.db.get_person_from_handle(fhandle)
if father:
fname = name_displayer.display(father)
mname = ""
if mhandle:
mother = self.dbstate.db.get_person_from_handle(mhandle)
if mother:
mname = name_displayer.display(mother)
self.pmodel.add([fname, mname, handle])
def fill_family(self):
for handle in self.family_list:
family = self.dbstate.db.get_family_from_handle(handle)
fhandle = family.get_father_handle()
mhandle = family.get_mother_handle()
name = ""
if fhandle and fhandle != self.person.handle:
spouse = self.dbstate.db.get_person_from_handle(fhandle)
if spouse:
name = name_displayer.display(spouse)
elif mhandle:
spouse = self.dbstate.db.get_person_from_handle(mhandle)
if spouse:
name = name_displayer.display(spouse)
reltype = str(family.get_relationship())
self.fmodel.add([name, reltype, handle])
def cancel_clicked(self, obj):
self.close()
def ok_clicked(self, obj):
name = name_displayer.display(self.person)
msg = _("Reorder Relationships: %s") % name
with DbTxn(msg, self.dbstate.db) as trans:
self.dbstate.db.commit_person(self.person, trans)
self.close()
def pup_clicked(self, obj):
"""Moves the current selection up one row"""
row = self.pmodel.get_selected_row()
if not row or row == -1:
return
store, the_iter = self.pmodel.get_selected()
data = self.pmodel.get_data(the_iter, range(3))
self.pmodel.remove(the_iter)
self.pmodel.insert(row-1, data, None, 1)
handle = self.parent_list.pop(row)
self.parent_list.insert(row-1, handle)
def pdown_clicked(self, obj):
row = self.pmodel.get_selected_row()
if row + 1 >= self.pmodel.count or row == -1:
return
store, the_iter = self.pmodel.get_selected()
data = self.pmodel.get_data(the_iter, range(3))
self.pmodel.remove(the_iter)
self.pmodel.insert(row+1, data, None, 1)
handle = self.parent_list.pop(row)
self.parent_list.insert(row+1, handle)
def fup_clicked(self, obj):
row = self.fmodel.get_selected_row()
if not row or row == -1:
return
store, the_iter = self.fmodel.get_selected()
data = self.fmodel.get_data(the_iter, range(3))
self.fmodel.remove(the_iter)
self.fmodel.insert(row-1, data, None, 1)
handle = self.family_list.pop(row)
self.family_list.insert(row-1, handle)
def fdown_clicked(self, obj):
row = self.fmodel.get_selected_row()
if row + 1 >= self.fmodel.count or row == -1:
return
store, the_iter = self.fmodel.get_selected()
data = self.fmodel.get_data(the_iter, range(3))
self.fmodel.remove(the_iter)
self.fmodel.insert(row+1, data, None, 1)
handle = self.family_list.pop(row)
self.family_list.insert(row+1, handle)
| gpl-2.0 |
mlperf/training_results_v0.6 | Fujitsu/benchmarks/resnet/implementations/mxnet/python/mxnet/libinfo.py | 2 | 3536 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding: utf-8
"""Information about mxnet."""
from __future__ import absolute_import
import os
import platform
import logging
def find_lib_path():
"""Find MXNet dynamic library files.
Returns
-------
lib_path : list(string)
List of all found path to the libraries.
"""
lib_from_env = os.environ.get('MXNET_LIBRARY_PATH')
if lib_from_env:
if os.path.isfile(lib_from_env):
if not os.path.isabs(lib_from_env):
logging.warning("MXNET_LIBRARY_PATH should be an absolute path, instead of: %s",
lib_from_env)
else:
if os.name == 'nt':
os.environ['PATH'] = os.environ['PATH'] + ';' + os.path.dirname(lib_from_env)
return [lib_from_env]
else:
logging.warning("MXNET_LIBRARY_PATH '%s' doesn't exist", lib_from_env)
curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
api_path = os.path.join(curr_path, '../../lib/')
cmake_build_path = os.path.join(curr_path, '../../build/')
dll_path = [curr_path, api_path, cmake_build_path]
if os.name == 'nt':
dll_path.append(os.path.join(curr_path, '../../build'))
vs_configuration = 'Release'
if platform.architecture()[0] == '64bit':
dll_path.append(os.path.join(curr_path, '../../build', vs_configuration))
dll_path.append(os.path.join(curr_path, '../../windows/x64', vs_configuration))
else:
dll_path.append(os.path.join(curr_path, '../../build', vs_configuration))
dll_path.append(os.path.join(curr_path, '../../windows', vs_configuration))
elif os.name == "posix" and os.environ.get('LD_LIBRARY_PATH', None):
dll_path[0:0] = [p.strip() for p in os.environ['LD_LIBRARY_PATH'].split(":")]
if os.name == 'nt':
os.environ['PATH'] = os.path.dirname(__file__) + ';' + os.environ['PATH']
dll_path = [os.path.join(p, 'libmxnet.dll') for p in dll_path]
elif platform.system() == 'Darwin':
dll_path = [os.path.join(p, 'libmxnet.dylib') for p in dll_path] + \
[os.path.join(p, 'libmxnet.so') for p in dll_path]
else:
dll_path.append('../../../')
dll_path = [os.path.join(p, 'libmxnet.so') for p in dll_path]
lib_path = [p for p in dll_path if os.path.exists(p) and os.path.isfile(p)]
if len(lib_path) == 0:
raise RuntimeError('Cannot find the MXNet library.\n' +
'List of candidates:\n' + str('\n'.join(dll_path)))
if os.name == 'nt':
os.environ['PATH'] = os.environ['PATH'] + ';' + os.path.dirname(lib_path[0])
return lib_path
# current version
__version__ = "1.3.1"
| apache-2.0 |
spencerlyon2/distcan | setup.py | 1 | 1060 | from distutils.core import setup
import os
long_desc = 'Add a fallback short description here'
if os.path.exists('README.txt'):
long_desc = open('README.txt').read()
# Write a versions.py file for class attribute
VERSION = "0.0.1"
def write_version_py(filename=None):
doc = ("\"\"\"\n" +
"This is a VERSION file and should NOT be manually altered"
+ "\n\"\"\"")
doc += "\nversion = \"%s\"" % VERSION
if not filename:
filename = os.path.join(
os.path.dirname(__file__), "distcan", "version.py")
f = open(filename, "w")
try:
f.write(doc)
finally:
f.close()
write_version_py()
# Setup
setup(name="distcan",
packages=["distcan"],
version=VERSION,
description="Probability distributions in their canonical form",
author="Spencer Lyon",
author_email="spencer.lyon@stern.nyu.edu",
url="https://github.com/spencerlyon2/distcan", # URL to the github repo
keywords=["statistics", "distributions"],
long_description=long_desc)
| mit |
slightstone/SickRage | lib/sqlalchemy/ext/declarative/api.py | 78 | 17872 | # ext/declarative/api.py
# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Public API functions and helpers for declarative."""
from ...schema import Table, MetaData
from ...orm import synonym as _orm_synonym, mapper,\
comparable_property,\
interfaces, properties
from ...orm.util import polymorphic_union
from ...orm.base import _mapper_or_none
from ...util import OrderedDict
from ... import exc
import weakref
from .base import _as_declarative, \
_declarative_constructor,\
_DeferredMapperConfig, _add_attribute
from .clsregistry import _class_resolver
from . import clsregistry
def instrument_declarative(cls, registry, metadata):
"""Given a class, configure the class declaratively,
using the given registry, which can be any dictionary, and
MetaData object.
"""
if '_decl_class_registry' in cls.__dict__:
raise exc.InvalidRequestError(
"Class %r already has been "
"instrumented declaratively" % cls)
cls._decl_class_registry = registry
cls.metadata = metadata
_as_declarative(cls, cls.__name__, cls.__dict__)
def has_inherited_table(cls):
"""Given a class, return True if any of the classes it inherits from has a
mapped table, otherwise return False.
"""
for class_ in cls.__mro__[1:]:
if getattr(class_, '__table__', None) is not None:
return True
return False
class DeclarativeMeta(type):
def __init__(cls, classname, bases, dict_):
if '_decl_class_registry' not in cls.__dict__:
_as_declarative(cls, classname, cls.__dict__)
type.__init__(cls, classname, bases, dict_)
def __setattr__(cls, key, value):
_add_attribute(cls, key, value)
def synonym_for(name, map_column=False):
"""Decorator, make a Python @property a query synonym for a column.
A decorator version of :func:`~sqlalchemy.orm.synonym`. The function being
decorated is the 'descriptor', otherwise passes its arguments through to
synonym()::
@synonym_for('col')
@property
def prop(self):
return 'special sauce'
The regular ``synonym()`` is also usable directly in a declarative setting
and may be convenient for read/write properties::
prop = synonym('col', descriptor=property(_read_prop, _write_prop))
"""
def decorate(fn):
return _orm_synonym(name, map_column=map_column, descriptor=fn)
return decorate
def comparable_using(comparator_factory):
"""Decorator, allow a Python @property to be used in query criteria.
This is a decorator front end to
:func:`~sqlalchemy.orm.comparable_property` that passes
through the comparator_factory and the function being decorated::
@comparable_using(MyComparatorType)
@property
def prop(self):
return 'special sauce'
The regular ``comparable_property()`` is also usable directly in a
declarative setting and may be convenient for read/write properties::
prop = comparable_property(MyComparatorType)
"""
def decorate(fn):
return comparable_property(comparator_factory, fn)
return decorate
class declared_attr(interfaces._MappedAttribute, property):
"""Mark a class-level method as representing the definition of
a mapped property or special declarative member name.
@declared_attr turns the attribute into a scalar-like
property that can be invoked from the uninstantiated class.
Declarative treats attributes specifically marked with
@declared_attr as returning a construct that is specific
to mapping or declarative table configuration. The name
of the attribute is that of what the non-dynamic version
of the attribute would be.
@declared_attr is more often than not applicable to mixins,
to define relationships that are to be applied to different
implementors of the class::
class ProvidesUser(object):
"A mixin that adds a 'user' relationship to classes."
@declared_attr
def user(self):
return relationship("User")
It also can be applied to mapped classes, such as to provide
a "polymorphic" scheme for inheritance::
class Employee(Base):
id = Column(Integer, primary_key=True)
type = Column(String(50), nullable=False)
@declared_attr
def __tablename__(cls):
return cls.__name__.lower()
@declared_attr
def __mapper_args__(cls):
if cls.__name__ == 'Employee':
return {
"polymorphic_on":cls.type,
"polymorphic_identity":"Employee"
}
else:
return {"polymorphic_identity":cls.__name__}
.. versionchanged:: 0.8 :class:`.declared_attr` can be used with
non-ORM or extension attributes, such as user-defined attributes
or :func:`.association_proxy` objects, which will be assigned
to the class at class construction time.
"""
def __init__(self, fget, *arg, **kw):
super(declared_attr, self).__init__(fget, *arg, **kw)
self.__doc__ = fget.__doc__
def __get__(desc, self, cls):
return desc.fget(cls)
def declarative_base(bind=None, metadata=None, mapper=None, cls=object,
name='Base', constructor=_declarative_constructor,
class_registry=None,
metaclass=DeclarativeMeta):
"""Construct a base class for declarative class definitions.
The new base class will be given a metaclass that produces
appropriate :class:`~sqlalchemy.schema.Table` objects and makes
the appropriate :func:`~sqlalchemy.orm.mapper` calls based on the
information provided declaratively in the class and any subclasses
of the class.
:param bind: An optional
:class:`~sqlalchemy.engine.Connectable`, will be assigned
the ``bind`` attribute on the :class:`~sqlalchemy.schema.MetaData`
instance.
:param metadata:
An optional :class:`~sqlalchemy.schema.MetaData` instance. All
:class:`~sqlalchemy.schema.Table` objects implicitly declared by
subclasses of the base will share this MetaData. A MetaData instance
will be created if none is provided. The
:class:`~sqlalchemy.schema.MetaData` instance will be available via the
`metadata` attribute of the generated declarative base class.
:param mapper:
An optional callable, defaults to :func:`~sqlalchemy.orm.mapper`. Will
be used to map subclasses to their Tables.
:param cls:
Defaults to :class:`object`. A type to use as the base for the generated
declarative base class. May be a class or tuple of classes.
:param name:
Defaults to ``Base``. The display name for the generated
class. Customizing this is not required, but can improve clarity in
tracebacks and debugging.
:param constructor:
Defaults to
:func:`~sqlalchemy.ext.declarative._declarative_constructor`, an
__init__ implementation that assigns \**kwargs for declared
fields and relationships to an instance. If ``None`` is supplied,
no __init__ will be provided and construction will fall back to
cls.__init__ by way of the normal Python semantics.
:param class_registry: optional dictionary that will serve as the
registry of class names-> mapped classes when string names
are used to identify classes inside of :func:`.relationship`
and others. Allows two or more declarative base classes
to share the same registry of class names for simplified
inter-base relationships.
:param metaclass:
Defaults to :class:`.DeclarativeMeta`. A metaclass or __metaclass__
compatible callable to use as the meta type of the generated
declarative base class.
.. seealso::
:func:`.as_declarative`
"""
lcl_metadata = metadata or MetaData()
if bind:
lcl_metadata.bind = bind
if class_registry is None:
class_registry = weakref.WeakValueDictionary()
bases = not isinstance(cls, tuple) and (cls,) or cls
class_dict = dict(_decl_class_registry=class_registry,
metadata=lcl_metadata)
if constructor:
class_dict['__init__'] = constructor
if mapper:
class_dict['__mapper_cls__'] = mapper
return metaclass(name, bases, class_dict)
def as_declarative(**kw):
"""
Class decorator for :func:`.declarative_base`.
Provides a syntactical shortcut to the ``cls`` argument
sent to :func:`.declarative_base`, allowing the base class
to be converted in-place to a "declarative" base::
from sqlalchemy.ext.declarative import as_declarative
@as_declarative()
class Base(object):
@declared_attr
def __tablename__(cls):
return cls.__name__.lower()
id = Column(Integer, primary_key=True)
class MyMappedClass(Base):
# ...
All keyword arguments passed to :func:`.as_declarative` are passed
along to :func:`.declarative_base`.
.. versionadded:: 0.8.3
.. seealso::
:func:`.declarative_base`
"""
def decorate(cls):
kw['cls'] = cls
kw['name'] = cls.__name__
return declarative_base(**kw)
return decorate
class ConcreteBase(object):
"""A helper class for 'concrete' declarative mappings.
:class:`.ConcreteBase` will use the :func:`.polymorphic_union`
function automatically, against all tables mapped as a subclass
to this class. The function is called via the
``__declare_last__()`` function, which is essentially
a hook for the :meth:`.after_configured` event.
:class:`.ConcreteBase` produces a mapped
table for the class itself. Compare to :class:`.AbstractConcreteBase`,
which does not.
Example::
from sqlalchemy.ext.declarative import ConcreteBase
class Employee(ConcreteBase, Base):
__tablename__ = 'employee'
employee_id = Column(Integer, primary_key=True)
name = Column(String(50))
__mapper_args__ = {
'polymorphic_identity':'employee',
'concrete':True}
class Manager(Employee):
__tablename__ = 'manager'
employee_id = Column(Integer, primary_key=True)
name = Column(String(50))
manager_data = Column(String(40))
__mapper_args__ = {
'polymorphic_identity':'manager',
'concrete':True}
"""
@classmethod
def _create_polymorphic_union(cls, mappers):
return polymorphic_union(OrderedDict(
(mp.polymorphic_identity, mp.local_table)
for mp in mappers
), 'type', 'pjoin')
@classmethod
def __declare_first__(cls):
m = cls.__mapper__
if m.with_polymorphic:
return
mappers = list(m.self_and_descendants)
pjoin = cls._create_polymorphic_union(mappers)
m._set_with_polymorphic(("*", pjoin))
m._set_polymorphic_on(pjoin.c.type)
class AbstractConcreteBase(ConcreteBase):
"""A helper class for 'concrete' declarative mappings.
:class:`.AbstractConcreteBase` will use the :func:`.polymorphic_union`
function automatically, against all tables mapped as a subclass
to this class. The function is called via the
``__declare_last__()`` function, which is essentially
a hook for the :meth:`.after_configured` event.
:class:`.AbstractConcreteBase` does not produce a mapped
table for the class itself. Compare to :class:`.ConcreteBase`,
which does.
Example::
from sqlalchemy.ext.declarative import AbstractConcreteBase
class Employee(AbstractConcreteBase, Base):
pass
class Manager(Employee):
__tablename__ = 'manager'
employee_id = Column(Integer, primary_key=True)
name = Column(String(50))
manager_data = Column(String(40))
__mapper_args__ = {
'polymorphic_identity':'manager',
'concrete':True}
"""
__abstract__ = True
@classmethod
def __declare_first__(cls):
if hasattr(cls, '__mapper__'):
return
clsregistry.add_class(cls.__name__, cls)
# can't rely on 'self_and_descendants' here
# since technically an immediate subclass
# might not be mapped, but a subclass
# may be.
mappers = []
stack = list(cls.__subclasses__())
while stack:
klass = stack.pop()
stack.extend(klass.__subclasses__())
mn = _mapper_or_none(klass)
if mn is not None:
mappers.append(mn)
pjoin = cls._create_polymorphic_union(mappers)
cls.__mapper__ = m = mapper(cls, pjoin, polymorphic_on=pjoin.c.type)
for scls in cls.__subclasses__():
sm = _mapper_or_none(scls)
if sm.concrete and cls in scls.__bases__:
sm._set_concrete_base(m)
class DeferredReflection(object):
"""A helper class for construction of mappings based on
a deferred reflection step.
Normally, declarative can be used with reflection by
setting a :class:`.Table` object using autoload=True
as the ``__table__`` attribute on a declarative class.
The caveat is that the :class:`.Table` must be fully
reflected, or at the very least have a primary key column,
at the point at which a normal declarative mapping is
constructed, meaning the :class:`.Engine` must be available
at class declaration time.
The :class:`.DeferredReflection` mixin moves the construction
of mappers to be at a later point, after a specific
method is called which first reflects all :class:`.Table`
objects created so far. Classes can define it as such::
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.ext.declarative import DeferredReflection
Base = declarative_base()
class MyClass(DeferredReflection, Base):
__tablename__ = 'mytable'
Above, ``MyClass`` is not yet mapped. After a series of
classes have been defined in the above fashion, all tables
can be reflected and mappings created using
:meth:`.prepare`::
engine = create_engine("someengine://...")
DeferredReflection.prepare(engine)
The :class:`.DeferredReflection` mixin can be applied to individual
classes, used as the base for the declarative base itself,
or used in a custom abstract class. Using an abstract base
allows that only a subset of classes to be prepared for a
particular prepare step, which is necessary for applications
that use more than one engine. For example, if an application
has two engines, you might use two bases, and prepare each
separately, e.g.::
class ReflectedOne(DeferredReflection, Base):
__abstract__ = True
class ReflectedTwo(DeferredReflection, Base):
__abstract__ = True
class MyClass(ReflectedOne):
__tablename__ = 'mytable'
class MyOtherClass(ReflectedOne):
__tablename__ = 'myothertable'
class YetAnotherClass(ReflectedTwo):
__tablename__ = 'yetanothertable'
# ... etc.
Above, the class hierarchies for ``ReflectedOne`` and
``ReflectedTwo`` can be configured separately::
ReflectedOne.prepare(engine_one)
ReflectedTwo.prepare(engine_two)
.. versionadded:: 0.8
"""
@classmethod
def prepare(cls, engine):
"""Reflect all :class:`.Table` objects for all current
:class:`.DeferredReflection` subclasses"""
to_map = _DeferredMapperConfig.classes_for_base(cls)
for thingy in to_map:
cls._sa_decl_prepare(thingy.local_table, engine)
thingy.map()
mapper = thingy.cls.__mapper__
metadata = mapper.class_.metadata
for rel in mapper._props.values():
if isinstance(rel, properties.RelationshipProperty) and \
rel.secondary is not None:
if isinstance(rel.secondary, Table):
cls._reflect_table(rel.secondary, engine)
elif isinstance(rel.secondary, _class_resolver):
rel.secondary._resolvers += (
cls._sa_deferred_table_resolver(engine, metadata),
)
@classmethod
def _sa_deferred_table_resolver(cls, engine, metadata):
def _resolve(key):
t1 = Table(key, metadata)
cls._reflect_table(t1, engine)
return t1
return _resolve
@classmethod
def _sa_decl_prepare(cls, local_table, engine):
# autoload Table, which is already
# present in the metadata. This
# will fill in db-loaded columns
# into the existing Table object.
if local_table is not None:
cls._reflect_table(local_table, engine)
@classmethod
def _reflect_table(cls, table, engine):
Table(table.name,
table.metadata,
extend_existing=True,
autoload_replace=False,
autoload=True,
autoload_with=engine,
schema=table.schema)
| gpl-3.0 |
googleapis/googleapis-gen | google/cloud/metastore/v1alpha/metastore-v1alpha-py/google/cloud/metastore_v1alpha/types/__init__.py | 2 | 2294 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from .metastore import (
Backup,
CreateBackupRequest,
CreateMetadataImportRequest,
CreateServiceRequest,
DatabaseDumpSpec,
DataCatalogConfig,
DeleteBackupRequest,
DeleteServiceRequest,
ExportMetadataRequest,
GetBackupRequest,
GetMetadataImportRequest,
GetServiceRequest,
HiveMetastoreConfig,
KerberosConfig,
ListBackupsRequest,
ListBackupsResponse,
ListMetadataImportsRequest,
ListMetadataImportsResponse,
ListServicesRequest,
ListServicesResponse,
LocationMetadata,
MaintenanceWindow,
MetadataExport,
MetadataImport,
MetadataIntegration,
MetadataManagementActivity,
OperationMetadata,
Restore,
RestoreServiceRequest,
Secret,
Service,
UpdateMetadataImportRequest,
UpdateServiceRequest,
)
__all__ = (
'Backup',
'CreateBackupRequest',
'CreateMetadataImportRequest',
'CreateServiceRequest',
'DatabaseDumpSpec',
'DataCatalogConfig',
'DeleteBackupRequest',
'DeleteServiceRequest',
'ExportMetadataRequest',
'GetBackupRequest',
'GetMetadataImportRequest',
'GetServiceRequest',
'HiveMetastoreConfig',
'KerberosConfig',
'ListBackupsRequest',
'ListBackupsResponse',
'ListMetadataImportsRequest',
'ListMetadataImportsResponse',
'ListServicesRequest',
'ListServicesResponse',
'LocationMetadata',
'MaintenanceWindow',
'MetadataExport',
'MetadataImport',
'MetadataIntegration',
'MetadataManagementActivity',
'OperationMetadata',
'Restore',
'RestoreServiceRequest',
'Secret',
'Service',
'UpdateMetadataImportRequest',
'UpdateServiceRequest',
)
| apache-2.0 |
oliverhr/odoo | addons/hr/__init__.py | 382 | 1092 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import hr
import res_config
import res_users
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
azureplus/hue | desktop/core/ext-py/Django-1.6.10/django/views/defaults.py | 105 | 3178 | import warnings
from django import http
from django.template import (Context, RequestContext,
loader, Template, TemplateDoesNotExist)
from django.views.decorators.csrf import requires_csrf_token
# This can be called when CsrfViewMiddleware.process_view has not run,
# therefore need @requires_csrf_token in case the template needs
# {% csrf_token %}.
@requires_csrf_token
def page_not_found(request, template_name='404.html'):
"""
Default 404 handler.
Templates: :template:`404.html`
Context:
request_path
The path of the requested URL (e.g., '/app/pages/bad_page/')
"""
try:
template = loader.get_template(template_name)
content_type = None # Django will use DEFAULT_CONTENT_TYPE
except TemplateDoesNotExist:
template = Template(
'<h1>Not Found</h1>'
'<p>The requested URL {{ request_path }} was not found on this server.</p>')
content_type = 'text/html'
body = template.render(RequestContext(request, {'request_path': request.path}))
return http.HttpResponseNotFound(body, content_type=content_type)
@requires_csrf_token
def server_error(request, template_name='500.html'):
"""
500 error handler.
Templates: :template:`500.html`
Context: None
"""
try:
template = loader.get_template(template_name)
except TemplateDoesNotExist:
return http.HttpResponseServerError('<h1>Server Error (500)</h1>', content_type='text/html')
return http.HttpResponseServerError(template.render(Context({})))
@requires_csrf_token
def bad_request(request, template_name='400.html'):
"""
400 error handler.
Templates: :template:`400.html`
Context: None
"""
try:
template = loader.get_template(template_name)
except TemplateDoesNotExist:
return http.HttpResponseBadRequest('<h1>Bad Request (400)</h1>', content_type='text/html')
return http.HttpResponseBadRequest(template.render(Context({})))
# This can be called when CsrfViewMiddleware.process_view has not run,
# therefore need @requires_csrf_token in case the template needs
# {% csrf_token %}.
@requires_csrf_token
def permission_denied(request, template_name='403.html'):
"""
Permission denied (403) handler.
Templates: :template:`403.html`
Context: None
If the template does not exist, an Http403 response containing the text
"403 Forbidden" (as per RFC 2616) will be returned.
"""
try:
template = loader.get_template(template_name)
except TemplateDoesNotExist:
return http.HttpResponseForbidden('<h1>403 Forbidden</h1>', content_type='text/html')
return http.HttpResponseForbidden(template.render(RequestContext(request)))
def shortcut(request, content_type_id, object_id):
warnings.warn(
"django.views.defaults.shortcut will be removed in Django 1.8. "
"Import it from django.contrib.contenttypes.views instead.",
PendingDeprecationWarning, stacklevel=2)
from django.contrib.contenttypes.views import shortcut as real_shortcut
return real_shortcut(request, content_type_id, object_id)
| apache-2.0 |
hef/samba | buildtools/wafsamba/samba_abi.py | 24 | 8800 | # functions for handling ABI checking of libraries
import Options, Utils, os, Logs, samba_utils, sys, Task, fnmatch, re, Build
from TaskGen import feature, before, after
# these type maps cope with platform specific names for common types
# please add new type mappings into the list below
abi_type_maps = {
'_Bool' : 'bool',
'struct __va_list_tag *' : 'va_list'
}
version_key = lambda x: map(int, x.split("."))
def normalise_signature(sig):
'''normalise a signature from gdb'''
sig = sig.strip()
sig = re.sub('^\$[0-9]+\s=\s\{(.+)\}$', r'\1', sig)
sig = re.sub('^\$[0-9]+\s=\s\{(.+)\}(\s0x[0-9a-f]+\s<\w+>)+$', r'\1', sig)
sig = re.sub('^\$[0-9]+\s=\s(0x[0-9a-f]+)\s?(<\w+>)?$', r'\1', sig)
sig = re.sub('0x[0-9a-f]+', '0xXXXX', sig)
sig = re.sub('", <incomplete sequence (\\\\[a-z0-9]+)>', r'\1"', sig)
for t in abi_type_maps:
# we need to cope with non-word characters in mapped types
m = t
m = m.replace('*', '\*')
if m[-1].isalnum() or m[-1] == '_':
m += '\\b'
if m[0].isalnum() or m[0] == '_':
m = '\\b' + m
sig = re.sub(m, abi_type_maps[t], sig)
return sig
def normalise_varargs(sig):
'''cope with older versions of gdb'''
sig = re.sub(',\s\.\.\.', '', sig)
return sig
def parse_sigs(sigs, abi_match):
'''parse ABI signatures file'''
abi_match = samba_utils.TO_LIST(abi_match)
ret = {}
a = sigs.split('\n')
for s in a:
if s.find(':') == -1:
continue
sa = s.split(':')
if abi_match:
matched = False
negative = False
for p in abi_match:
if p[0] == '!' and fnmatch.fnmatch(sa[0], p[1:]):
negative = True
break
elif fnmatch.fnmatch(sa[0], p):
matched = True
break
if (not matched) and negative:
continue
Logs.debug("%s -> %s" % (sa[1], normalise_signature(sa[1])))
ret[sa[0]] = normalise_signature(sa[1])
return ret
def save_sigs(sig_file, parsed_sigs):
'''save ABI signatures to a file'''
sigs = ''
for s in sorted(parsed_sigs.keys()):
sigs += '%s: %s\n' % (s, parsed_sigs[s])
return samba_utils.save_file(sig_file, sigs, create_dir=True)
def abi_check_task(self):
'''check if the ABI has changed'''
abi_gen = self.ABI_GEN
libpath = self.inputs[0].abspath(self.env)
libname = os.path.basename(libpath)
sigs = Utils.cmd_output([abi_gen, libpath])
parsed_sigs = parse_sigs(sigs, self.ABI_MATCH)
sig_file = self.ABI_FILE
old_sigs = samba_utils.load_file(sig_file)
if old_sigs is None or Options.options.ABI_UPDATE:
if not save_sigs(sig_file, parsed_sigs):
raise Utils.WafError('Failed to save ABI file "%s"' % sig_file)
Logs.warn('Generated ABI signatures %s' % sig_file)
return
parsed_old_sigs = parse_sigs(old_sigs, self.ABI_MATCH)
# check all old sigs
got_error = False
for s in parsed_old_sigs:
if not s in parsed_sigs:
Logs.error('%s: symbol %s has been removed - please update major version\n\tsignature: %s' % (
libname, s, parsed_old_sigs[s]))
got_error = True
elif normalise_varargs(parsed_old_sigs[s]) != normalise_varargs(parsed_sigs[s]):
Logs.error('%s: symbol %s has changed - please update major version\n\told_signature: %s\n\tnew_signature: %s' % (
libname, s, parsed_old_sigs[s], parsed_sigs[s]))
got_error = True
for s in parsed_sigs:
if not s in parsed_old_sigs:
Logs.error('%s: symbol %s has been added - please mark it _PRIVATE_ or update minor version\n\tsignature: %s' % (
libname, s, parsed_sigs[s]))
got_error = True
if got_error:
raise Utils.WafError('ABI for %s has changed - please fix library version then build with --abi-update\nSee http://wiki.samba.org/index.php/Waf#ABI_Checking for more information\nIf you have not changed any ABI, and your platform always gives this error, please configure with --abi-check-disable to skip this check' % libname)
t = Task.task_type_from_func('abi_check', abi_check_task, color='BLUE', ext_in='.bin')
t.quiet = True
# allow "waf --abi-check" to force re-checking the ABI
if '--abi-check' in sys.argv:
Task.always_run(t)
@after('apply_link')
@feature('abi_check')
def abi_check(self):
'''check that ABI matches saved signatures'''
env = self.bld.env
if not env.ABI_CHECK or self.abi_directory is None:
return
# if the platform doesn't support -fvisibility=hidden then the ABI
# checks become fairly meaningless
if not env.HAVE_VISIBILITY_ATTR:
return
topsrc = self.bld.srcnode.abspath()
abi_gen = os.path.join(topsrc, 'buildtools/scripts/abi_gen.sh')
abi_file = "%s/%s-%s.sigs" % (self.abi_directory, self.name, self.vnum)
tsk = self.create_task('abi_check', self.link_task.outputs[0])
tsk.ABI_FILE = abi_file
tsk.ABI_MATCH = self.abi_match
tsk.ABI_GEN = abi_gen
def abi_process_file(fname, version, symmap):
'''process one ABI file, adding new symbols to the symmap'''
f = open(fname, mode='r')
for line in f:
symname = line.split(":")[0]
if not symname in symmap:
symmap[symname] = version
f.close()
def abi_write_vscript(f, libname, current_version, versions, symmap, abi_match):
"""Write a vscript file for a library in --version-script format.
:param f: File-like object to write to
:param libname: Name of the library, uppercased
:param current_version: Current version
:param versions: Versions to consider
:param symmap: Dictionary mapping symbols -> version
:param abi_match: List of symbols considered to be public in the current
version
"""
invmap = {}
for s in symmap:
invmap.setdefault(symmap[s], []).append(s)
last_key = ""
versions = sorted(versions, key=version_key)
for k in versions:
symver = "%s_%s" % (libname, k)
if symver == current_version:
break
f.write("%s {\n" % symver)
if k in sorted(invmap.keys()):
f.write("\tglobal:\n")
for s in invmap.get(k, []):
f.write("\t\t%s;\n" % s);
f.write("}%s;\n\n" % last_key)
last_key = " %s" % symver
f.write("%s {\n" % current_version)
local_abi = filter(lambda x: x[0] == '!', abi_match)
global_abi = filter(lambda x: x[0] != '!', abi_match)
f.write("\tglobal:\n")
if len(global_abi) > 0:
for x in global_abi:
f.write("\t\t%s;\n" % x)
else:
f.write("\t\t*;\n")
if abi_match != ["*"]:
f.write("\tlocal:\n")
for x in local_abi:
f.write("\t\t%s;\n" % x[1:])
if len(global_abi) > 0:
f.write("\t\t*;\n")
f.write("};\n")
def abi_build_vscript(task):
'''generate a vscript file for our public libraries'''
tgt = task.outputs[0].bldpath(task.env)
symmap = {}
versions = []
for f in task.inputs:
fname = f.abspath(task.env)
basename = os.path.basename(fname)
version = basename[len(task.env.LIBNAME)+1:-len(".sigs")]
versions.append(version)
abi_process_file(fname, version, symmap)
f = open(tgt, mode='w')
try:
abi_write_vscript(f, task.env.LIBNAME, task.env.VERSION, versions,
symmap, task.env.ABI_MATCH)
finally:
f.close()
def ABI_VSCRIPT(bld, libname, abi_directory, version, vscript, abi_match=None):
'''generate a vscript file for our public libraries'''
if abi_directory:
source = bld.path.ant_glob('%s/%s-[0-9]*.sigs' % (abi_directory, libname))
def abi_file_key(path):
return version_key(path[:-len(".sigs")].rsplit("-")[-1])
source = sorted(source.split(), key=abi_file_key)
else:
source = ''
libname = os.path.basename(libname)
version = os.path.basename(version)
libname = libname.replace("-", "_").replace("+","_").upper()
version = version.replace("-", "_").replace("+","_").upper()
t = bld.SAMBA_GENERATOR(vscript,
rule=abi_build_vscript,
source=source,
group='vscripts',
target=vscript)
if abi_match is None:
abi_match = ["*"]
else:
abi_match = samba_utils.TO_LIST(abi_match)
t.env.ABI_MATCH = abi_match
t.env.VERSION = version
t.env.LIBNAME = libname
t.vars = ['LIBNAME', 'VERSION', 'ABI_MATCH']
Build.BuildContext.ABI_VSCRIPT = ABI_VSCRIPT
| gpl-3.0 |
atwoz/vimeko | update_plugins.py | 2 | 3193 | import zipfile
import shutil
import tempfile
import requests
from os import path
#--- Globals ----------------------------------------------
PLUGINS = """
ack.vim https://github.com/mileszs/ack.vim
ag.vim https://github.com/rking/ag.vim
bufexplorer https://github.com/corntrace/bufexplorer
ctrlp.vim https://github.com/kien/ctrlp.vim
mayansmoke https://github.com/vim-scripts/mayansmoke
nerdtree https://github.com/scrooloose/nerdtree
nginx.vim https://github.com/vim-scripts/nginx.vim
open_file_under_cursor.vim https://github.com/amix/open_file_under_cursor.vim
snipmate-snippets https://github.com/scrooloose/snipmate-snippets
tlib https://github.com/vim-scripts/tlib
vim-addon-mw-utils https://github.com/MarcWeber/vim-addon-mw-utils
vim-bundle-mako https://github.com/sophacles/vim-bundle-mako
vim-coffee-script https://github.com/kchmck/vim-coffee-script
vim-colors-solarized https://github.com/altercation/vim-colors-solarized
vim-indent-object https://github.com/michaeljsmith/vim-indent-object
vim-less https://github.com/groenewege/vim-less
vim-markdown https://github.com/tpope/vim-markdown
vim-pyte https://github.com/therubymug/vim-pyte
vim-snipmate https://github.com/garbas/vim-snipmate
vim-snippets https://github.com/honza/vim-snippets
vim-surround https://github.com/tpope/vim-surround
vim-expand-region https://github.com/terryma/vim-expand-region
vim-multiple-cursors https://github.com/terryma/vim-multiple-cursors
vim-fugitive https://github.com/tpope/vim-fugitive
vim-airline https://github.com/bling/vim-airline
goyo.vim https://github.com/junegunn/goyo.vim
vim-zenroom2 https://github.com/amix/vim-zenroom2
syntastic https://github.com/scrooloose/syntastic
vim-repeat https://github.com/tpope/vim-repeat
vim-commentary https://github.com/tpope/vim-commentary
vim-go https://github.com/fatih/vim-go
vim-gitgutter https://github.com/airblade/vim-gitgutter
gruvbox https://github.com/morhetz/gruvbox
""".strip()
GITHUB_ZIP = '%s/archive/master.zip'
SOURCE_DIR = path.join(path.dirname(__file__), 'sources_non_forked')
def download_extract_replace(plugin_name, zip_path, temp_dir, source_dir):
temp_zip_path = path.join(temp_dir, plugin_name)
# Download and extract file in temp dir
req = requests.get(zip_path)
open(temp_zip_path, 'wb').write(req.content)
zip_f = zipfile.ZipFile(temp_zip_path)
zip_f.extractall(temp_dir)
plugin_temp_path = path.join(temp_dir,
path.join(temp_dir, '%s-master' % plugin_name))
# Remove the current plugin and replace it with the extracted
plugin_dest_path = path.join(source_dir, plugin_name)
try:
shutil.rmtree(plugin_dest_path)
except OSError:
pass
shutil.move(plugin_temp_path, plugin_dest_path)
print('Updated {0}'.format(plugin_name))
if __name__ == '__main__':
temp_directory = tempfile.mkdtemp()
try:
for line in PLUGINS.splitlines():
name, github_url = line.split(' ')
zip_path = GITHUB_ZIP % github_url
download_extract_replace(name, zip_path,
temp_directory, SOURCE_DIR)
finally:
shutil.rmtree(temp_directory)
| mit |
2014c2g6/c2g6 | w2/static/Brython2.0.0-20140209-164925/Lib/atexit.py | 743 | 1049 | """allow programmer to define multiple exit functions to be executedupon normal program termination.
Two public functions, register and unregister, are defined.
"""
class __loader__(object):
pass
def _clear(*args,**kw):
"""_clear() -> None
Clear the list of previously registered exit functions."""
pass
def _run_exitfuncs(*args,**kw):
"""_run_exitfuncs() -> None
Run all registered exit functions."""
pass
def register(*args,**kw):
"""register(func, *args, **kwargs) -> func
Register a function to be executed upon normal program termination
func - function to be called at exit
args - optional arguments to pass to func
kwargs - optional keyword arguments to pass to func
func is returned to facilitate usage as a decorator."""
pass
def unregister(*args,**kw):
"""unregister(func) -> None
Unregister a exit function which was previously registered using
atexit.register
func - function to be unregistered"""
pass
| gpl-2.0 |
twitter/zktraffic | zktraffic/zab/quorum_packet.py | 1 | 12328 | # ==================================================================================================
# Copyright 2015 Twitter, Inc.
# --------------------------------------------------------------------------------------------------
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this work except in compliance with the License.
# You may obtain a copy of the License in the LICENSE file, or at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==================================================================================================
from datetime import datetime
from six import string_types
from zktraffic.base.network import BadPacket
from zktraffic.base.util import read_long, read_number
from zktraffic.base.zookeeper import ZK_REQUEST_TYPES
class PacketType(object):
REQUEST = 1
PROPOSAL = 2
ACK = 3
COMMIT = 4
PING = 5
REVALIDATE = 6
SYNC = 7
INFORM = 8
COMMITANDACTIVATE = 9
NEWLEADER = 10
FOLLOWERINFO = 11
UPTODATE = 12
DIFF = 13
TRUNC = 14
SNAP = 15
OBSERVERINFO = 16
LEADERINFO = 17
ACKEPOCH = 18
INFORMANDACTIVATE = 19
VALID = range(REQUEST, INFORMANDACTIVATE + 1)
NAMES = [
"zero",
"request",
"proposal",
"ack",
"commit",
"ping",
"revalidate",
"sync",
"inform",
"commitandactivate",
"newleader",
"followerinfo",
"uptodate",
"diff",
"trunc",
"snap",
"observerinfo",
"leaderinfo",
"ackepoch",
"informandactivate",
]
@classmethod
def invalid(cls, ptype):
return ptype not in cls.VALID
@classmethod
def to_str(cls, ptype):
return "" if cls.invalid(ptype) else cls.NAMES[ptype]
class QuorumPacketBase(type):
TYPES = {}
PTYPE = None
def __new__(cls, clsname, bases, dct):
obj = super(QuorumPacketBase, cls).__new__(cls, clsname, bases, dct)
if obj.PTYPE in cls.TYPES:
raise ValueError("Duplicate ptype name: %s" % obj.PTYPE)
if obj.PTYPE is not None:
cls.TYPES[obj.PTYPE] = obj
return obj
@classmethod
def get(cls, key, default=None):
return cls.TYPES.get(key, default)
class QuorumPacket(QuorumPacketBase("QuorumPacketBase", (object,), {})):
__slots__ = ("timestamp", "src", "dst", "type", "zxid", "length")
MIN_SIZE = 12
def __init__(self, timestamp, src, dst, ptype, zxid, length):
self.timestamp = timestamp
self.src = src
self.dst = dst
self.type = ptype
self.zxid = zxid
self.length = length
@property
def timestr(self):
return datetime.fromtimestamp(self.timestamp).strftime("%H:%M:%S:%f")
@property
def type_literal(self):
return PacketType.to_str(self.type)
@property
def zxid_literal(self):
return self.zxid if self.zxid == -1 else "0x%x" % self.zxid
@classmethod
def with_params(cls, timestamp, src, dst, ptype, zxid, data, offset):
return cls(timestamp, src, dst, ptype, zxid, len(data))
@classmethod
def from_payload(cls, data, src, dst, timestamp):
if len(data) < cls.MIN_SIZE:
raise BadPacket("Too small")
ptype, offset = read_number(data, 0)
if PacketType.invalid(ptype):
raise BadPacket("Invalid type")
zxid, offset = read_long(data, offset)
handler = QuorumPacketBase.get(ptype, cls)
return handler.with_params(timestamp, src, dst, ptype, zxid, data, offset)
def __str__(self):
def attributes():
def valid(key, value):
if not isinstance(value, int) and not isinstance(value, string_types):
return False
if key.isupper() or key.startswith("_") or "_literal" in key or key == "type":
return False
return True
for key in dir(self):
value = getattr(self, key)
if valid(key, value):
alt_key = "%s_literal" % key
if hasattr(self, alt_key):
value = getattr(self, alt_key)
yield key, value
parts = ["%s(" % self.__class__.__name__]
for name, value in attributes():
parts.append(" %s=%s," % (name, value))
parts.append(")")
return "\n".join(parts) + "\n"
class Request(QuorumPacket):
PTYPE = PacketType.REQUEST
__slots__ = ("session_id", "cxid", "req_type")
def __init__(self, timestamp, src, dst, ptype, zxid, length, session_id, cxid, req_type):
super(Request, self).__init__(timestamp, src, dst, ptype, zxid, length)
self.session_id = session_id
self.cxid = cxid
self.req_type = req_type
@property
def req_type_literal(self):
return ZK_REQUEST_TYPES[self.req_type] if self.req_type in ZK_REQUEST_TYPES else str(self.req_type)
@property
def session_id_literal(self):
return "0x%x" % self.session_id
@classmethod
def with_params(cls, timestamp, src, dst, ptype, zxid, data, offset):
data_len, offset = read_number(data, offset)
session_id, offset = read_long(data, offset)
cxid, offset = read_number(data, offset)
req_type, offset = read_number(data, offset)
# TODO: dissect the remaining data, see server_message.py and client_message.py
# Note: zxid=-1 because requests don't have a zxid
return cls(timestamp, src, dst, ptype, -1, len(data), session_id, cxid, req_type)
class Proposal(QuorumPacket):
PTYPE = PacketType.PROPOSAL
__slots__ = ("session_id", "cxid", "txn_zxid", "txn_time", "txn_type")
def __init__(self, timestamp, src, dst, ptype, zxid, length,
session_id, cxid, txn_zxid, txn_time, txn_type):
super(Proposal, self).__init__(timestamp, src, dst, ptype, zxid, length)
self.session_id = session_id
self.cxid = cxid
self.txn_zxid = txn_zxid
self.txn_time = txn_time
self.txn_type = txn_type
@property
def session_id_literal(self):
return "0x%x" % self.session_id
@property
def txn_type_literal(self):
return ZK_REQUEST_TYPES[self.txn_type] if self.txn_type in ZK_REQUEST_TYPES else str(self.txn_type)
@classmethod
def with_params(cls, timestamp, src, dst, ptype, zxid, data, offset):
data_len, offset = read_number(data, offset)
session_id, offset = read_long(data, offset)
cxid, offset = read_number(data, offset)
txn_zxid, offset = read_long(data, offset)
txn_time, offset = read_long(data, offset)
txn_type, offset = read_number(data, offset)
# TODO: dissect the remaining data
# see org.apache.zookeeper.server.util.SerializeUtils.deserializeTxn()
return cls(timestamp, src, dst, ptype, zxid, len(data),
session_id, cxid, txn_zxid, txn_time, txn_type)
class Ack(QuorumPacket):
PTYPE = PacketType.ACK
class Commit(QuorumPacket):
PTYPE = PacketType.COMMIT
class Ping(QuorumPacket):
PTYPE = PacketType.PING
# TODO: dissect the data (in almost all cases, data is null)
class Revalidate(QuorumPacket):
PTYPE = PacketType.REVALIDATE
__slots__ = ("session_id", "timeout")
def __init__(self, timestamp, src, dst, ptype, zxid, length, session_id, timeout):
super(Revalidate, self).__init__(timestamp, src, dst, ptype, zxid, length)
self.session_id = session_id
self.timeout = timeout
@property
def session_id_literal(self):
return "0x%x" % self.session_id
@classmethod
def with_params(cls, timestamp, src, dst, ptype, zxid, data, offset):
data_len, offset = read_number(data, offset)
session_id, offset = read_long(data, offset)
timeout, offset = read_number(data, offset)
return cls(timestamp, src, dst, ptype, zxid, len(data), session_id, timeout)
class Sync(QuorumPacket):
PTYPE = PacketType.SYNC
class Inform(Proposal):
PTYPE = PacketType.INFORM
class CommitAndActivate(QuorumPacket):
PTYPE = PacketType.COMMITANDACTIVATE
__slots__ = ("suggested_leader_id")
def __init__(self, timestamp, src, dst, ptype, zxid, length, suggested_leader_id):
super(CommitAndActivate, self).__init__(timestamp, src, dst, ptype, zxid, length)
self.suggested_leader_id = suggested_leader_id
@classmethod
def with_params(cls, timestamp, src, dst, ptype, zxid, data, offset):
data_len, offset = read_number(data, offset)
suggested_leader_id, offset = read_long(data, offset)
return cls(timestamp, src, dst, ptype, zxid, len(data), suggested_leader_id)
class NewLeader(QuorumPacket):
PTYPE = PacketType.NEWLEADER
# TODO: dissect the data (in almost all cases, data is null)
class FollowerInfo(QuorumPacket):
PTYPE = PacketType.FOLLOWERINFO
__slots__ = ("sid", "protocol_version", "config_version")
def __init__(self, timestamp, src, dst, ptype, zxid, length,
sid, protocol_version, config_version):
super(FollowerInfo, self).__init__(timestamp, src, dst, ptype, zxid, length)
self.sid = sid
self.protocol_version = protocol_version
self.config_version = config_version
@classmethod
def with_params(cls, timestamp, src, dst, ptype, zxid, data, offset):
data_len, offset = read_number(data, offset)
sid, offset = read_long(data, offset)
protocol_version, offset = read_number(data, offset)
config_version, offset = read_long(data, offset)
return cls(timestamp, src, dst, ptype, zxid, len(data),
sid, protocol_version, config_version)
class UpToDate(QuorumPacket):
PTYPE = PacketType.UPTODATE
class Diff(QuorumPacket):
PTYPE = PacketType.DIFF
class Trunc(QuorumPacket):
PTYPE = PacketType.TRUNC
class Snap(QuorumPacket):
PTYPE = PacketType.SNAP
class ObserverInfo(FollowerInfo):
PTYPE = PacketType.OBSERVERINFO
class LeaderInfo(QuorumPacket):
PTYPE = PacketType.LEADERINFO
__slots__ = ("protocol_version")
def __init__(self, timestamp, src, dst, ptype, zxid, length, protocol_version):
super(LeaderInfo, self).__init__(timestamp, src, dst, ptype, zxid, length)
self.protocol_version = protocol_version
@classmethod
def with_params(cls, timestamp, src, dst, ptype, zxid, data, offset):
data_len, offset = read_number(data, offset)
protocol_version, offset = read_number(data, offset)
return cls(timestamp, src, dst, ptype, zxid, len(data), protocol_version)
class AckEpoch(QuorumPacket):
PTYPE = PacketType.ACKEPOCH
__slots__ = ("epoch")
def __init__(self, timestamp, src, dst, ptype, zxid, length, epoch):
super(AckEpoch, self).__init__(timestamp, src, dst, ptype, zxid, length)
self.epoch = epoch
@classmethod
def with_params(cls, timestamp, src, dst, ptype, zxid, data, offset):
data_len, offset = read_number(data, offset)
epoch, offset = read_number(data, offset)
return cls(timestamp, src, dst, ptype, zxid, len(data), epoch)
class InformAndActivate(Proposal):
__slots__ = ("session_id", "cxid", "txn_zxid", "txn_time", "txn_type", "suggested_leader_id")
PTYPE = PacketType.INFORMANDACTIVATE
def __init__(self, timestamp, src, dst, ptype, zxid, length,
suggested_leader_id,
session_id, cxid, txn_zxid, txn_time, txn_type):
super(Proposal, self).__init__(timestamp, src, dst, ptype, zxid, length)
self.suggested_leader_id = suggested_leader_id
self.session_id = session_id
self.cxid = cxid
self.txn_zxid = txn_zxid
self.txn_time = txn_time
self.txn_type = txn_type
@property
def session_id_literal(self):
return "0x%x" % self.session_id
@classmethod
def with_params(cls, timestamp, src, dst, ptype, zxid, data, offset):
data_len, offset = read_number(data, offset)
suggested_leader_id, offset = read_long(data, offset)
session_id, offset = read_long(data, offset)
cxid, offset = read_number(data, offset)
txn_zxid, offset = read_long(data, offset)
txn_time, offset = read_long(data, offset)
txn_type, offset = read_number(data, offset)
return cls(timestamp, src, dst, ptype, zxid, len(data),
suggested_leader_id,
session_id, cxid, txn_zxid, txn_time, txn_type)
| apache-2.0 |
anuragamodi/profitpy | profit/models/ticker.py | 18 | 2372 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2007 Troy Melhase <troy@gci.net>
# Distributed under the terms of the GNU General Public License v2
from PyQt4.QtCore import Qt, QModelIndex, QVariant, QString
from profit.lib import valueAlign
from profit.models import BasicItem, BasicItemModel
## This model isn't used yet. It should morph into a replacement
## for the plot controls model in profit.lib.widgets.plot.
class TickerModel(BasicItemModel):
""" Model for a single ticker.
"""
def __init__(self, session=None, parent=None):
BasicItemModel.__init__(self, TickerRootItem(), parent)
self.session = session
self.data = {}
if session is not None:
session.registerMeta(self)
def __contains__(self, item):
return item in self.data
def __getitem__(self, name):
return self.data[name]
def __setitem__(self, name, value):
self.data[name] = value
def keys(self):
return self.data.keys()
def items(self):
return self.data.items()
def data(self, index, role=Qt.DecorationRole):
""" Framework hook to retrieve information from this model.
@param index QModelIndex instance
@param role=Qt.DecorationRole
@return QVariant of some kind
"""
if not index.isValid():
return QVariant()
item = index.internalPointer()
column = index.column()
data = QVariant()
return data
def on_session_TickPrice_TickSize(self, message):
tickerId = message.tickerId
try:
tickerdata = self[tickerId]
except (KeyError, ):
tickerdata = self[tickerId] = \
self.session.strategy.makeTicker(tickerId)
self.emit(Signals.createdTicker, tickerId, tickerdata)
try:
value = message.price
except (AttributeError, ):
value = message.size
field = message.field
try:
seq = tickerdata.series[field]
except (KeyError, ):
seq = tickerdata.series[field] = \
self.session.strategy.makeTickerSeries(tickerId, field)
self.emit(Signals.createdSeries, tickerId, field)
seq.append(value)
class TickerItem(BasicItem):
pass
class TickerRootItem(TickerItem):
pass
| gpl-2.0 |
wylliam-silva/leiteilustrador | node_modules/node-gyp/gyp/pylib/gyp/win_tool.py | 1417 | 12751 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Utility functions for Windows builds.
These functions are executed via gyp-win-tool when using the ninja generator.
"""
import os
import re
import shutil
import subprocess
import stat
import string
import sys
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
# A regex matching an argument corresponding to the output filename passed to
# link.exe.
_LINK_EXE_OUT_ARG = re.compile('/OUT:(?P<out>.+)$', re.IGNORECASE)
def main(args):
executor = WinTool()
exit_code = executor.Dispatch(args)
if exit_code is not None:
sys.exit(exit_code)
class WinTool(object):
"""This class performs all the Windows tooling steps. The methods can either
be executed directly, or dispatched from an argument list."""
def _UseSeparateMspdbsrv(self, env, args):
"""Allows to use a unique instance of mspdbsrv.exe per linker instead of a
shared one."""
if len(args) < 1:
raise Exception("Not enough arguments")
if args[0] != 'link.exe':
return
# Use the output filename passed to the linker to generate an endpoint name
# for mspdbsrv.exe.
endpoint_name = None
for arg in args:
m = _LINK_EXE_OUT_ARG.match(arg)
if m:
endpoint_name = re.sub(r'\W+', '',
'%s_%d' % (m.group('out'), os.getpid()))
break
if endpoint_name is None:
return
# Adds the appropriate environment variable. This will be read by link.exe
# to know which instance of mspdbsrv.exe it should connect to (if it's
# not set then the default endpoint is used).
env['_MSPDBSRV_ENDPOINT_'] = endpoint_name
def Dispatch(self, args):
"""Dispatches a string command to a method."""
if len(args) < 1:
raise Exception("Not enough arguments")
method = "Exec%s" % self._CommandifyName(args[0])
return getattr(self, method)(*args[1:])
def _CommandifyName(self, name_string):
"""Transforms a tool name like recursive-mirror to RecursiveMirror."""
return name_string.title().replace('-', '')
def _GetEnv(self, arch):
"""Gets the saved environment from a file for a given architecture."""
# The environment is saved as an "environment block" (see CreateProcess
# and msvs_emulation for details). We convert to a dict here.
# Drop last 2 NULs, one for list terminator, one for trailing vs. separator.
pairs = open(arch).read()[:-2].split('\0')
kvs = [item.split('=', 1) for item in pairs]
return dict(kvs)
def ExecStamp(self, path):
"""Simple stamp command."""
open(path, 'w').close()
def ExecRecursiveMirror(self, source, dest):
"""Emulation of rm -rf out && cp -af in out."""
if os.path.exists(dest):
if os.path.isdir(dest):
def _on_error(fn, path, excinfo):
# The operation failed, possibly because the file is set to
# read-only. If that's why, make it writable and try the op again.
if not os.access(path, os.W_OK):
os.chmod(path, stat.S_IWRITE)
fn(path)
shutil.rmtree(dest, onerror=_on_error)
else:
if not os.access(dest, os.W_OK):
# Attempt to make the file writable before deleting it.
os.chmod(dest, stat.S_IWRITE)
os.unlink(dest)
if os.path.isdir(source):
shutil.copytree(source, dest)
else:
shutil.copy2(source, dest)
def ExecLinkWrapper(self, arch, use_separate_mspdbsrv, *args):
"""Filter diagnostic output from link that looks like:
' Creating library ui.dll.lib and object ui.dll.exp'
This happens when there are exports from the dll or exe.
"""
env = self._GetEnv(arch)
if use_separate_mspdbsrv == 'True':
self._UseSeparateMspdbsrv(env, args)
link = subprocess.Popen([args[0].replace('/', '\\')] + list(args[1:]),
shell=True,
env=env,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
out, _ = link.communicate()
for line in out.splitlines():
if (not line.startswith(' Creating library ') and
not line.startswith('Generating code') and
not line.startswith('Finished generating code')):
print line
return link.returncode
def ExecLinkWithManifests(self, arch, embed_manifest, out, ldcmd, resname,
mt, rc, intermediate_manifest, *manifests):
"""A wrapper for handling creating a manifest resource and then executing
a link command."""
# The 'normal' way to do manifests is to have link generate a manifest
# based on gathering dependencies from the object files, then merge that
# manifest with other manifests supplied as sources, convert the merged
# manifest to a resource, and then *relink*, including the compiled
# version of the manifest resource. This breaks incremental linking, and
# is generally overly complicated. Instead, we merge all the manifests
# provided (along with one that includes what would normally be in the
# linker-generated one, see msvs_emulation.py), and include that into the
# first and only link. We still tell link to generate a manifest, but we
# only use that to assert that our simpler process did not miss anything.
variables = {
'python': sys.executable,
'arch': arch,
'out': out,
'ldcmd': ldcmd,
'resname': resname,
'mt': mt,
'rc': rc,
'intermediate_manifest': intermediate_manifest,
'manifests': ' '.join(manifests),
}
add_to_ld = ''
if manifests:
subprocess.check_call(
'%(python)s gyp-win-tool manifest-wrapper %(arch)s %(mt)s -nologo '
'-manifest %(manifests)s -out:%(out)s.manifest' % variables)
if embed_manifest == 'True':
subprocess.check_call(
'%(python)s gyp-win-tool manifest-to-rc %(arch)s %(out)s.manifest'
' %(out)s.manifest.rc %(resname)s' % variables)
subprocess.check_call(
'%(python)s gyp-win-tool rc-wrapper %(arch)s %(rc)s '
'%(out)s.manifest.rc' % variables)
add_to_ld = ' %(out)s.manifest.res' % variables
subprocess.check_call(ldcmd + add_to_ld)
# Run mt.exe on the theoretically complete manifest we generated, merging
# it with the one the linker generated to confirm that the linker
# generated one does not add anything. This is strictly unnecessary for
# correctness, it's only to verify that e.g. /MANIFESTDEPENDENCY was not
# used in a #pragma comment.
if manifests:
# Merge the intermediate one with ours to .assert.manifest, then check
# that .assert.manifest is identical to ours.
subprocess.check_call(
'%(python)s gyp-win-tool manifest-wrapper %(arch)s %(mt)s -nologo '
'-manifest %(out)s.manifest %(intermediate_manifest)s '
'-out:%(out)s.assert.manifest' % variables)
assert_manifest = '%(out)s.assert.manifest' % variables
our_manifest = '%(out)s.manifest' % variables
# Load and normalize the manifests. mt.exe sometimes removes whitespace,
# and sometimes doesn't unfortunately.
with open(our_manifest, 'rb') as our_f:
with open(assert_manifest, 'rb') as assert_f:
our_data = our_f.read().translate(None, string.whitespace)
assert_data = assert_f.read().translate(None, string.whitespace)
if our_data != assert_data:
os.unlink(out)
def dump(filename):
sys.stderr.write('%s\n-----\n' % filename)
with open(filename, 'rb') as f:
sys.stderr.write(f.read() + '\n-----\n')
dump(intermediate_manifest)
dump(our_manifest)
dump(assert_manifest)
sys.stderr.write(
'Linker generated manifest "%s" added to final manifest "%s" '
'(result in "%s"). '
'Were /MANIFEST switches used in #pragma statements? ' % (
intermediate_manifest, our_manifest, assert_manifest))
return 1
def ExecManifestWrapper(self, arch, *args):
"""Run manifest tool with environment set. Strip out undesirable warning
(some XML blocks are recognized by the OS loader, but not the manifest
tool)."""
env = self._GetEnv(arch)
popen = subprocess.Popen(args, shell=True, env=env,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out, _ = popen.communicate()
for line in out.splitlines():
if line and 'manifest authoring warning 81010002' not in line:
print line
return popen.returncode
def ExecManifestToRc(self, arch, *args):
"""Creates a resource file pointing a SxS assembly manifest.
|args| is tuple containing path to resource file, path to manifest file
and resource name which can be "1" (for executables) or "2" (for DLLs)."""
manifest_path, resource_path, resource_name = args
with open(resource_path, 'wb') as output:
output.write('#include <windows.h>\n%s RT_MANIFEST "%s"' % (
resource_name,
os.path.abspath(manifest_path).replace('\\', '/')))
def ExecMidlWrapper(self, arch, outdir, tlb, h, dlldata, iid, proxy, idl,
*flags):
"""Filter noisy filenames output from MIDL compile step that isn't
quietable via command line flags.
"""
args = ['midl', '/nologo'] + list(flags) + [
'/out', outdir,
'/tlb', tlb,
'/h', h,
'/dlldata', dlldata,
'/iid', iid,
'/proxy', proxy,
idl]
env = self._GetEnv(arch)
popen = subprocess.Popen(args, shell=True, env=env,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out, _ = popen.communicate()
# Filter junk out of stdout, and write filtered versions. Output we want
# to filter is pairs of lines that look like this:
# Processing C:\Program Files (x86)\Microsoft SDKs\...\include\objidl.idl
# objidl.idl
lines = out.splitlines()
prefixes = ('Processing ', '64 bit Processing ')
processing = set(os.path.basename(x)
for x in lines if x.startswith(prefixes))
for line in lines:
if not line.startswith(prefixes) and line not in processing:
print line
return popen.returncode
def ExecAsmWrapper(self, arch, *args):
"""Filter logo banner from invocations of asm.exe."""
env = self._GetEnv(arch)
popen = subprocess.Popen(args, shell=True, env=env,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out, _ = popen.communicate()
for line in out.splitlines():
if (not line.startswith('Copyright (C) Microsoft Corporation') and
not line.startswith('Microsoft (R) Macro Assembler') and
not line.startswith(' Assembling: ') and
line):
print line
return popen.returncode
def ExecRcWrapper(self, arch, *args):
"""Filter logo banner from invocations of rc.exe. Older versions of RC
don't support the /nologo flag."""
env = self._GetEnv(arch)
popen = subprocess.Popen(args, shell=True, env=env,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out, _ = popen.communicate()
for line in out.splitlines():
if (not line.startswith('Microsoft (R) Windows (R) Resource Compiler') and
not line.startswith('Copyright (C) Microsoft Corporation') and
line):
print line
return popen.returncode
def ExecActionWrapper(self, arch, rspfile, *dir):
"""Runs an action command line from a response file using the environment
for |arch|. If |dir| is supplied, use that as the working directory."""
env = self._GetEnv(arch)
# TODO(scottmg): This is a temporary hack to get some specific variables
# through to actions that are set after gyp-time. http://crbug.com/333738.
for k, v in os.environ.iteritems():
if k not in env:
env[k] = v
args = open(rspfile).read()
dir = dir[0] if dir else None
return subprocess.call(args, shell=True, env=env, cwd=dir)
def ExecClCompile(self, project_dir, selected_files):
"""Executed by msvs-ninja projects when the 'ClCompile' target is used to
build selected C/C++ files."""
project_dir = os.path.relpath(project_dir, BASE_DIR)
selected_files = selected_files.split(';')
ninja_targets = [os.path.join(project_dir, filename) + '^^'
for filename in selected_files]
cmd = ['ninja.exe']
cmd.extend(ninja_targets)
return subprocess.call(cmd, shell=True, cwd=BASE_DIR)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| mit |
isyippee/nova | nova/keymgr/not_implemented_key_mgr.py | 112 | 1417 | # Copyright (c) 2013 The Johns Hopkins University/Applied Physics Laboratory
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Key manager implementation that raises NotImplementedError
"""
from nova.keymgr import key_mgr
class NotImplementedKeyManager(key_mgr.KeyManager):
"""Key Manager Interface that raises NotImplementedError for all operations
"""
def create_key(self, ctxt, algorithm='AES', length=256, expiration=None,
**kwargs):
raise NotImplementedError()
def store_key(self, ctxt, key, expiration=None, **kwargs):
raise NotImplementedError()
def copy_key(self, ctxt, key_id, **kwargs):
raise NotImplementedError()
def get_key(self, ctxt, key_id, **kwargs):
raise NotImplementedError()
def delete_key(self, ctxt, key_id, **kwargs):
raise NotImplementedError()
| apache-2.0 |
wmde/jenkins-job-builder | jenkins_jobs/modules/hipchat_notif.py | 12 | 4499 | # Copyright 2012 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Enable hipchat notification of build execution.
Example::
- job:
name: test_job
hipchat:
enabled: true
room: Testjob Build Notifications
start-notify: true
In the jenkins UI specification, the hipchat plugin must be explicitly
selected as a publisher. This is not required (or supported) here - use the
``enabled`` parameter to enable/disable the publisher action.
If you set ``enabled: false``, no hipchat parameters are written to XML.
"""
# Enabling hipchat notifications on a job requires specifying the hipchat
# config in job properties, and adding the hipchat notifier to the job's
# publishers list.
# The publisher configuration contains extra details not specified per job:
# - the hipchat authorisation token.
# - the jenkins server url.
# - a default room name/id.
# This complicates matters somewhat since the sensible place to store these
# details is in the global config file.
# The global config object is therefore passed down to the registry object,
# and this object is passed to the HipChat() class initialiser.
import xml.etree.ElementTree as XML
import jenkins_jobs.modules.base
import jenkins_jobs.errors
import logging
import ConfigParser
import sys
logger = logging.getLogger(__name__)
class HipChat(jenkins_jobs.modules.base.Base):
sequence = 80
def __init__(self, registry):
self.authToken = None
self.jenkinsUrl = None
self.registry = registry
def _load_global_data(self):
"""Load data from the global config object.
This is done lazily to avoid looking up the '[hipchat]' section
unless actually required.
"""
if(not self.authToken):
try:
self.authToken = self.registry.global_config.get(
'hipchat', 'authtoken')
# Require that the authtoken is non-null
if self.authToken == '':
raise jenkins_jobs.errors.JenkinsJobsException(
"Hipchat authtoken must not be a blank string")
except (ConfigParser.NoSectionError,
jenkins_jobs.errors.JenkinsJobsException), e:
logger.fatal("The configuration file needs a hipchat section" +
" containing authtoken:\n{0}".format(e))
sys.exit(1)
self.jenkinsUrl = self.registry.global_config.get('jenkins', 'url')
def gen_xml(self, parser, xml_parent, data):
hipchat = data.get('hipchat')
if not hipchat or not hipchat.get('enabled', True):
return
if('room' not in hipchat):
raise jenkins_jobs.errors.YAMLFormatError(
"Missing hipchat 'room' specifier")
self._load_global_data()
properties = xml_parent.find('properties')
if properties is None:
properties = XML.SubElement(xml_parent, 'properties')
pdefhip = XML.SubElement(properties,
'jenkins.plugins.hipchat.'
'HipChatNotifier_-HipChatJobProperty')
XML.SubElement(pdefhip, 'room').text = hipchat['room']
XML.SubElement(pdefhip, 'startNotification').text = str(
hipchat.get('start-notify', False)).lower()
publishers = xml_parent.find('publishers')
if publishers is None:
publishers = XML.SubElement(xml_parent, 'publishers')
hippub = XML.SubElement(publishers,
'jenkins.plugins.hipchat.HipChatNotifier')
XML.SubElement(hippub, 'jenkinsUrl').text = self.jenkinsUrl
XML.SubElement(hippub, 'authToken').text = self.authToken
# The room specified here is the default room. The default is
# redundant in this case since a room must be specified. Leave empty.
XML.SubElement(hippub, 'room').text = ''
| apache-2.0 |
mlperf/training_results_v0.6 | Fujitsu/benchmarks/resnet/implementations/mxnet/tests/python/predict/mxnet_predict_example.py | 54 | 2451 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import sys, os
curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
sys.path.append("../../../amalgamation/python/")
from mxnet_predict import Predictor, load_ndarray_file
import logging
import numpy as np
from skimage import io, transform
# Load the pre-trained model
prefix = "resnet/resnet-18"
num_round = 0
symbol_file = "%s-symbol.json" % prefix
param_file = "%s-0000.params" % prefix
predictor = Predictor(open(symbol_file, "r").read(),
open(param_file, "rb").read(),
{'data':(1, 3, 224, 224)})
synset = [l.strip() for l in open('resnet/synset.txt').readlines()]
def PreprocessImage(path, show_img=False):
# load image
img = io.imread(path)
print("Original Image Shape: ", img.shape)
# we crop image from center
short_egde = min(img.shape[:2])
yy = int((img.shape[0] - short_egde) / 2)
xx = int((img.shape[1] - short_egde) / 2)
crop_img = img[yy : yy + short_egde, xx : xx + short_egde]
# resize to 224, 224
resized_img = transform.resize(crop_img, (224, 224))
# convert to numpy.ndarray
sample = np.asarray(resized_img) * 255
# swap axes to make image from (224, 224, 3) to (3, 224, 224)
sample = np.swapaxes(sample, 0, 2)
sample = np.swapaxes(sample, 1, 2)
# sub mean
return sample
# Get preprocessed batch (single image batch)
batch = PreprocessImage('./download.jpg', True)
predictor.forward(data=batch)
prob = predictor.get_output(0)[0]
pred = np.argsort(prob)[::-1]
# Get top1 label
top1 = synset[pred[0]]
print("Top1: ", top1)
# Get top5 label
top5 = [synset[pred[i]] for i in range(5)]
print("Top5: ", top5)
| apache-2.0 |
embeddedarm/android_external_chromium_org | chrome/common/extensions/docs/server2/sidenav_data_source_test.py | 23 | 4689 | #!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import unittest
from extensions_paths import JSON_TEMPLATES
from mock_file_system import MockFileSystem
from server_instance import ServerInstance
from servlet import Request
from sidenav_data_source import SidenavDataSource, _AddLevels, _AddSelected
from test_file_system import TestFileSystem
from test_util import CaptureLogging
class SamplesDataSourceTest(unittest.TestCase):
def testAddLevels(self):
sidenav_json = [{
'title': 'H2',
'items': [{
'title': 'H3',
'items': [{ 'title': 'X1' }]
}]
}]
expected = [{
'level': 1,
'title': 'H2',
'items': [{
'level': 2,
'title': 'H3',
'items': [{ 'level': 3, 'title': 'X1' }]
}]
}]
_AddLevels(sidenav_json, 1)
self.assertEqual(expected, sidenav_json)
def testAddSelected(self):
sidenav_json = [
{ 'href': '/AH2.html' },
{
'href': '/H2.html',
'items': [{
'href': '/H3.html'
}]
}
]
expected = [
{ 'href': '/AH2.html' },
{
'child_selected': True,
'href': '/H2.html',
'items': [{
'href': '/H3.html',
'selected': True
}]
}
]
_AddSelected(sidenav_json, '/H3.html')
self.assertEqual(expected, sidenav_json)
def testWithDifferentBasePath(self):
file_system = TestFileSystem({
'apps_sidenav.json': json.dumps([
{ 'href': '/H1.html' },
{ 'href': '/H2.html' },
{ 'href': '/base/path/H2.html' },
{ 'href': 'https://qualified/X1.html' },
{
'href': 'H3.html',
'items': [{
'href': 'H4.html'
}]
},
])
}, relative_to=JSON_TEMPLATES)
expected = [
{'href': '/base/path/H1.html', 'level': 2},
{'href': '/base/path/H2.html', 'level': 2, 'selected': True},
{'href': '/base/path/base/path/H2.html', 'level': 2},
{'href': 'https://qualified/X1.html', 'level': 2},
{'items': [
{'href': '/base/path/H4.html', 'level': 3}
],
'href': '/base/path/H3.html', 'level': 2}
]
server_instance = ServerInstance.ForTest(file_system,
base_path='/base/path/')
sidenav_data_source = SidenavDataSource(server_instance,
Request.ForTest('/H2.html'))
log_output = CaptureLogging(
lambda: self.assertEqual(expected, sidenav_data_source.get('apps')))
self.assertEqual(2, len(log_output))
def testSidenavDataSource(self):
file_system = MockFileSystem(TestFileSystem({
'apps_sidenav.json': json.dumps([{
'title': 'H1',
'href': 'H1.html',
'items': [{
'title': 'H2',
'href': '/H2.html'
}]
}])
}, relative_to=JSON_TEMPLATES))
expected = [{
'level': 2,
'child_selected': True,
'title': 'H1',
'href': '/H1.html',
'items': [{
'level': 3,
'selected': True,
'title': 'H2',
'href': '/H2.html'
}]
}]
sidenav_data_source = SidenavDataSource(
ServerInstance.ForTest(file_system), Request.ForTest('/H2.html'))
self.assertTrue(*file_system.CheckAndReset())
log_output = CaptureLogging(
lambda: self.assertEqual(expected, sidenav_data_source.get('apps')))
self.assertEqual(1, len(log_output))
self.assertTrue(
log_output[0].msg.startswith('Paths in sidenav must be qualified.'))
# Test that only a single file is read when creating the sidenav, so that
# we can be confident in the compiled_file_system.SingleFile annotation.
self.assertTrue(*file_system.CheckAndReset(
read_count=1, stat_count=1, read_resolve_count=1))
def testCron(self):
file_system = TestFileSystem({
'apps_sidenav.json': '[{ "title": "H1" }]' ,
'extensions_sidenav.json': '[{ "title": "H2" }]'
}, relative_to=JSON_TEMPLATES)
# Ensure Cron doesn't rely on request.
sidenav_data_source = SidenavDataSource(
ServerInstance.ForTest(file_system), request=None)
sidenav_data_source.Cron().Get()
# If Cron fails, apps_sidenav.json will not be cached, and the _cache_data
# access will fail.
# TODO(jshumway): Make a non hack version of this check.
sidenav_data_source._cache._file_object_store.Get(
'%s/apps_sidenav.json' % JSON_TEMPLATES).Get()._cache_data
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
leighpauls/k2cro4 | third_party/python_26/Lib/site-packages/pythonwin/pywin/framework/intpyapp.py | 17 | 15409 | # intpyapp.py - Interactive Python application class
#
import win32con
import win32api
import win32ui
import __main__
import sys
import string
import app
import traceback
from pywin.mfc import window, afxres, dialog
import commctrl
import dbgcommands
lastLocateFileName = ".py" # used in the "File/Locate" dialog...
# todo - _SetupSharedMenu should be moved to a framework class.
def _SetupSharedMenu_(self):
sharedMenu = self.GetSharedMenu()
from pywin.framework import toolmenu
toolmenu.SetToolsMenu(sharedMenu)
from pywin.framework import help
help.SetHelpMenuOtherHelp(sharedMenu)
from pywin.mfc import docview
docview.DocTemplate._SetupSharedMenu_=_SetupSharedMenu_
class MainFrame(app.MainFrame):
def OnCreate(self, createStruct):
self.closing = 0
if app.MainFrame.OnCreate(self, createStruct)==-1:
return -1
style = win32con.WS_CHILD | afxres.CBRS_SIZE_DYNAMIC | afxres.CBRS_TOP | afxres.CBRS_TOOLTIPS | afxres.CBRS_FLYBY
self.EnableDocking(afxres.CBRS_ALIGN_ANY)
tb = win32ui.CreateToolBar (self, style | win32con.WS_VISIBLE)
tb.ModifyStyle(0, commctrl.TBSTYLE_FLAT)
tb.LoadToolBar(win32ui.IDR_MAINFRAME)
tb.EnableDocking(afxres.CBRS_ALIGN_ANY)
tb.SetWindowText("Standard")
self.DockControlBar(tb)
# Any other packages which use toolbars
from pywin.debugger.debugger import PrepareControlBars
PrepareControlBars(self)
# Note "interact" also uses dockable windows, but they already happen
# And a "Tools" menu on the main frame.
menu = self.GetMenu()
import toolmenu
toolmenu.SetToolsMenu(menu, 2)
# And fix the "Help" menu on the main frame
from pywin.framework import help
help.SetHelpMenuOtherHelp(menu)
def OnClose(self):
try:
import pywin.debugger
if pywin.debugger.currentDebugger is not None and pywin.debugger.currentDebugger.pumping:
try:
pywin.debugger.currentDebugger.close(1)
except:
import traceback
traceback.print_exc()
return
except win32ui.error:
pass
self.closing = 1
self.SaveBarState("ToolbarDefault")
self.SetActiveView(None) # Otherwise MFC's OnClose may _not_ prompt for save.
from pywin.framework import help
help.FinalizeHelp()
self.DestroyControlBar(afxres.AFX_IDW_TOOLBAR)
self.DestroyControlBar(win32ui.ID_VIEW_TOOLBAR_DBG)
return self._obj_.OnClose()
def DestroyControlBar(self, id):
try:
bar = self.GetControlBar(id)
except win32ui.error:
return
bar.DestroyWindow()
def OnCommand(self, wparam, lparam):
# By default, the current MDI child frame will process WM_COMMAND
# messages before any docked control bars - even if the control bar
# has focus. This is a problem for the interactive window when docked.
# Therefore, we detect the situation of a view having the main frame
# as its parent, and assume it must be a docked view (which it will in an MDI app)
try:
v = self.GetActiveView() # Raise an exception if none - good - then we want default handling
# Main frame _does_ have a current view (ie, a docking view) - see if it wants it.
if v.OnCommand(wparam, lparam):
return 1
except (win32ui.error, AttributeError):
pass
return self._obj_.OnCommand(wparam, lparam)
class InteractivePythonApp(app.CApp):
# This works if necessary - just we dont need to override the Run method.
# def Run(self):
# return self._obj_.Run()
def HookCommands(self):
app.CApp.HookCommands(self)
dbgcommands.DebuggerCommandHandler().HookCommands()
self.HookCommand(self.OnViewBrowse,win32ui.ID_VIEW_BROWSE)
self.HookCommand(self.OnFileImport,win32ui.ID_FILE_IMPORT)
self.HookCommand(self.OnFileCheck,win32ui.ID_FILE_CHECK)
self.HookCommandUpdate(self.OnUpdateFileCheck, win32ui.ID_FILE_CHECK)
self.HookCommand(self.OnFileRun,win32ui.ID_FILE_RUN)
self.HookCommand(self.OnFileLocate,win32ui.ID_FILE_LOCATE)
self.HookCommand(self.OnInteractiveWindow, win32ui.ID_VIEW_INTERACTIVE)
self.HookCommandUpdate(self.OnUpdateInteractiveWindow, win32ui.ID_VIEW_INTERACTIVE)
self.HookCommand(self.OnViewOptions, win32ui.ID_VIEW_OPTIONS)
self.HookCommand(self.OnHelpIndex, afxres.ID_HELP_INDEX)
self.HookCommand(self.OnFileSaveAll, win32ui.ID_FILE_SAVE_ALL)
self.HookCommand(self.OnViewToolbarDbg, win32ui.ID_VIEW_TOOLBAR_DBG)
self.HookCommandUpdate(self.OnUpdateViewToolbarDbg, win32ui.ID_VIEW_TOOLBAR_DBG)
def CreateMainFrame(self):
return MainFrame()
def MakeExistingDDEConnection(self):
# Use DDE to connect to an existing instance
# Return None if no existing instance
try:
import intpydde
except ImportError:
# No dde support!
return None
conv = intpydde.CreateConversation(self.ddeServer)
try:
conv.ConnectTo("Pythonwin", "System")
return conv
except intpydde.error:
return None
def InitDDE(self):
# Do all the magic DDE handling.
# Returns TRUE if we have pumped the arguments to our
# remote DDE app, and we should terminate.
try:
import intpydde
except ImportError:
self.ddeServer = None
intpydde = None
if intpydde is not None:
self.ddeServer = intpydde.DDEServer(self)
self.ddeServer.Create("Pythonwin", intpydde.CBF_FAIL_SELFCONNECTIONS )
try:
# If there is an existing instance, pump the arguments to it.
connection = self.MakeExistingDDEConnection()
if connection is not None:
if self.ProcessArgs(sys.argv, connection) is None:
return 1
except:
win32ui.MessageBox("There was an error in the DDE conversation with Pythonwin")
traceback.print_exc()
def InitInstance(self):
# Allow "/nodde" and "/newinstance to optimize this!
if "/nodde" not in sys.argv and "/newinstance" not in sys.argv:
if self.InitDDE():
return 1 # A remote DDE client is doing it for us!
else:
self.ddeServer = None
win32ui.SetRegistryKey("Python %s" % (sys.winver,)) # MFC automatically puts the main frame caption on!
app.CApp.InitInstance(self)
# Create the taskbar icon
win32ui.CreateDebuggerThread()
# Allow Pythonwin to host OCX controls.
win32ui.EnableControlContainer()
# Display the interactive window if the user wants it.
import interact
interact.CreateInteractiveWindowUserPreference()
# Load the modules we use internally.
self.LoadSystemModules()
# Load additional module the user may want.
self.LoadUserModules()
# Load the ToolBar state near the end of the init process, as
# there may be Toolbar IDs created by the user or other modules.
# By now all these modules should be loaded, so all the toolbar IDs loaded.
try:
self.frame.LoadBarState("ToolbarDefault")
except win32ui.error:
# MFC sucks. It does essentially "GetDlgItem(x)->Something", so if the
# toolbar with ID x does not exist, MFC crashes! Pythonwin has a trap for this
# but I need to investigate more how to prevent it (AFAIK, ensuring all the
# toolbars are created by now _should_ stop it!)
pass
# Finally process the command line arguments.
self.ProcessArgs(sys.argv)
def ExitInstance(self):
win32ui.DestroyDebuggerThread()
try:
import interact
interact.DestroyInteractiveWindow()
except:
pass
if self.ddeServer is not None:
self.ddeServer.Shutdown()
self.ddeServer = None
return app.CApp.ExitInstance(self)
def Activate(self):
# Bring to the foreground. Mainly used when another app starts up, it asks
# this one to activate itself, then it terminates.
frame = win32ui.GetMainFrame()
frame.SetForegroundWindow()
if frame.GetWindowPlacement()[1]==win32con.SW_SHOWMINIMIZED:
frame.ShowWindow(win32con.SW_RESTORE)
def ProcessArgs(self, args, dde = None):
# If we are going to talk to a remote app via DDE, then
# activate it!
if dde is not None: dde.Exec("self.Activate()")
if len(args) and args[0] in ['/nodde','/newinstance']: del args[0] # already handled.
if len(args)<1 or not args[0]: # argv[0]=='' when started without args, just like Python.exe!
return
try:
if args[0] and args[0][0]!='/':
argStart = 0
argType = string.lower(win32ui.GetProfileVal("Python","Default Arg Type","/edit"))
else:
argStart = 1
argType = args[0]
if argStart >= len(args):
raise TypeError, "The command line requires an additional arg."
if argType=="/edit":
# Load up the default application.
if dde:
fname = win32api.GetFullPathName(args[argStart])
dde.Exec("win32ui.GetApp().OpenDocumentFile(%s)" % (`fname`))
else:
win32ui.GetApp().OpenDocumentFile(args[argStart])
elif argType=="/rundlg":
if dde:
dde.Exec("import scriptutils;scriptutils.RunScript('%s', '%s', 1)" % (args[argStart], string.join(args[argStart+1:])))
else:
import scriptutils
scriptutils.RunScript(args[argStart], string.join(args[argStart+1:]))
elif argType=="/run":
if dde:
dde.Exec("import scriptutils;scriptutils.RunScript('%s', '%s', 0)" % (args[argStart], string.join(args[argStart+1:])))
else:
import scriptutils
scriptutils.RunScript(args[argStart], string.join(args[argStart+1:]), 0)
elif argType=="/app":
raise RuntimeError, "/app only supported for new instances of Pythonwin.exe"
elif argType=='/new': # Allow a new instance of Pythonwin
return 1
elif argType=='/dde': # Send arbitary command
if dde is not None:
dde.Exec(args[argStart])
else:
win32ui.MessageBox("The /dde command can only be used\r\nwhen Pythonwin is already running")
else:
raise TypeError, "Command line arguments not recognised"
except:
typ, val, tb = sys.exc_info()
print "There was an error processing the command line args"
traceback.print_exception(typ, val, tb, None, sys.stdout)
win32ui.OutputDebug("There was a problem with the command line args - %s: %s" % (`typ`,`val`))
tb = None # Prevent a cycle
def LoadSystemModules(self):
self.DoLoadModules("editor,stdin")
def LoadUserModules(self, moduleNames = None):
# Load the users modules.
if moduleNames is None:
default = "sgrepmdi,mdi_pychecker"
moduleNames=win32ui.GetProfileVal('Python','Startup Modules',default)
self.DoLoadModules(moduleNames)
def DoLoadModules(self, moduleNames): # ", sep string of module names.
if not moduleNames: return
modules = string.splitfields(moduleNames,",")
for module in modules:
try:
exec "import "+module
except: # Catch em all, else the app itself dies! 'ImportError:
traceback.print_exc()
msg = 'Startup import of user module "%s" failed' % module
print msg
win32ui.MessageBox(msg)
#
# DDE Callback
#
def OnDDECommand(self, command):
# print "DDE Executing", `command`
try:
exec command + "\n"
except:
print "ERROR executing DDE command: ", command
traceback.print_exc()
raise
#
# General handlers
#
def OnViewBrowse( self, id, code ):
" Called when ViewBrowse message is received "
from pywin.mfc import dialog
from pywin.tools import browser
obName = dialog.GetSimpleInput('Object', '__builtins__', 'Browse Python Object')
if obName is None:
return
try:
browser.Browse(eval(obName, __main__.__dict__, __main__.__dict__))
except NameError:
win32ui.MessageBox('This is no object with this name')
except AttributeError:
win32ui.MessageBox('The object has no attribute of that name')
except:
traceback.print_exc()
win32ui.MessageBox('This object can not be browsed')
def OnFileImport( self, id, code ):
" Called when a FileImport message is received. Import the current or specified file"
import scriptutils
scriptutils.ImportFile()
def OnFileCheck( self, id, code ):
" Called when a FileCheck message is received. Check the current file."
import scriptutils
scriptutils.CheckFile()
def OnUpdateFileCheck(self, cmdui):
import scriptutils
cmdui.Enable( scriptutils.GetActiveFileName(0) is not None )
def OnFileRun( self, id, code ):
" Called when a FileRun message is received. "
import scriptutils
showDlg = win32api.GetKeyState(win32con.VK_SHIFT) >= 0
scriptutils.RunScript(None, None, showDlg)
def OnFileLocate( self, id, code ):
from pywin.mfc import dialog
import scriptutils
import os
global lastLocateFileName # save the new version away for next time...
# Loop until a good name, or cancel
while 1:
name = dialog.GetSimpleInput('File name', lastLocateFileName, 'Locate Python File')
if name is None: # Cancelled.
break
lastLocateFileName = name
# if ".py" supplied, rip it off!
if string.lower(lastLocateFileName[-3:])=='.py':
lastLocateFileName = lastLocateFileName[:-3]
lastLocateFileName = string.translate(lastLocateFileName, string.maketrans(".","\\"))
newName = scriptutils.LocatePythonFile(lastLocateFileName)
if newName is None:
win32ui.MessageBox("The file '%s' can not be located" % lastLocateFileName)
else:
win32ui.GetApp().OpenDocumentFile(newName)
break
# Display all the "options" proprety pages we can find
def OnViewOptions(self, id, code):
win32ui.InitRichEdit()
sheet = dialog.PropertySheet("Pythonwin Options")
# Add property pages we know about that need manual work.
from pywin.dialogs import ideoptions
sheet.AddPage( ideoptions.OptionsPropPage() )
import toolmenu
sheet.AddPage( toolmenu.ToolMenuPropPage() )
# Get other dynamic pages from templates.
pages = []
for template in self.GetDocTemplateList():
try:
# Dont actually call the function with the exception handler.
getter = template.GetPythonPropertyPages
except AttributeError:
# Template does not provide property pages!
continue
pages = pages + getter()
# Debugger template goes at the end
try:
from pywin.debugger import configui
except ImportError:
configui = None
if configui is not None: pages.append(configui.DebuggerOptionsPropPage())
# Now simply add the pages, and display the dialog.
for page in pages:
sheet.AddPage(page)
if sheet.DoModal()==win32con.IDOK:
win32ui.SetStatusText("Applying configuration changes...", 1)
win32ui.DoWaitCursor(1)
# Tell every Window in our app that win.ini has changed!
win32ui.GetMainFrame().SendMessageToDescendants(win32con.WM_WININICHANGE, 0, 0)
win32ui.DoWaitCursor(0)
def OnInteractiveWindow(self, id, code):
# toggle the existing state.
import interact
interact.ToggleInteractiveWindow()
def OnUpdateInteractiveWindow(self, cmdui):
try:
interact=sys.modules['pywin.framework.interact']
state = interact.IsInteractiveWindowVisible()
except KeyError: # Interactive module hasnt ever been imported.
state = 0
cmdui.Enable()
cmdui.SetCheck(state)
def OnFileSaveAll(self, id, code):
# Only attempt to save editor documents.
from pywin.framework.editor import editorTemplate
docs = filter(lambda doc: doc.IsModified() and doc.GetPathName(), editorTemplate.GetDocumentList())
map(lambda doc: doc.OnSaveDocument(doc.GetPathName()), docs)
win32ui.SetStatusText("%d documents saved" % len(docs), 1)
def OnViewToolbarDbg(self, id, code):
if code==0:
return not win32ui.GetMainFrame().OnBarCheck(id)
def OnUpdateViewToolbarDbg(self, cmdui):
win32ui.GetMainFrame().OnUpdateControlBarMenu(cmdui)
cmdui.Enable(1)
def OnHelpIndex( self, id, code ):
import help
help.SelectAndRunHelpFile()
# As per the comments in app.py, this use is depreciated.
# app.AppBuilder = InteractivePythonApp
# Now all we do is create the application
thisApp = InteractivePythonApp()
| bsd-3-clause |
heafod/SaltAdmin | view/main.py | 7 | 2274 | #!/usr/bin/env python
#-*- coding:utf-8 -*-
from config.setting import *
def make_html(s):
return web.websafe(s).replace('\r\n', '<br />')
web.template.Template.globals['make_html'] = make_html
# Get Seession ID and ShowName
def getLogin():
if web.config._session.get('isLogin') is False:
return False
else:
return {'SID':web.config._session.SID,
'Username':web.config._session.Username,
'Token':web.config._session.Token,
'ShowName':web.config._session.ShowName,
'LastDate':web.config._session.LastDate,
'LastIP':web.config._session.LastIP,
'LastLocation':web.config._session.LastLocation,
'Lstat':web.config._session.Lstat}
# 定义登录检查装饰函数
def Check_Login(func):
def inner(*args):
if web.config._session.get('isLogin') is True:
return func(*args)
else:
web.setcookie('HTTP_REFERER', web.ctx.fullpath, 86400)
return web.seeother("/login")
return inner
# 定义公共函数: 格式化时间戳
def format_timestamp(s):
s = int(float(s))
D = 0
H = 0
M = 0
S = s
if S > 59:
M = S / 60
S = S % 60
if M > 59:
H = M / 60
M = M % 60
if H > 23:
D = H / 24
H = H % 24
return "%d天%d小时%d分%d秒" % ( D, H, M, S )
# Define 404
def notfound():
#return web.notfound(render.info("404","404 - Not Found"))
#return web.notfound("404 - Not Found")
#return web.notfound("<center>Developing ~~~ <br />Coming Soon ~~! <br /><I>Design By Luxiaok</I></center>")
if getLogin():
SID = getLogin()['SID']
ShowName = getLogin()['ShowName']
return web.notfound(render.test(ShowName=ShowName,uid=SID))
else:
web.setcookie('HTTP_REFERER', web.ctx.fullpath, 86400)
#return web.seeother("/login")
return web.notfound(render.login())
web.webapi.notfound = notfound
# Define 500
def internalerror():
return web.internalerror("500 - Internal Error")
web.webapi.internalerror = internalerror
#if __name__ == "__main__":
# app = web.application(urls, globals())
# app.run()
| gpl-2.0 |
biothings/biothings_explorer | biothings_explorer/api_call_dispatcher.py | 1 | 11261 | # -*- coding: utf-8 -*-
"""
Make API calls.
.. moduleauthor:: Jiwen Xin <kevinxin@scripps.edu>
"""
from collections import defaultdict
from copy import deepcopy
from itertools import groupby
from operator import itemgetter
from .registry import Registry
from .apicall import BioThingsCaller
from .api_output_parser import OutputParser
from .api_preprocess import APIPreprocess
class Dispatcher:
"""Dispatch API calls."""
def __init__(self, registry=None):
"""Load BTE registry and API caller."""
if not registry:
self.registry = Registry().registry
else:
self.registry = registry.registry
self.caller = BioThingsCaller()
@staticmethod
def get_unique_edge_id(edge):
operation = edge["operation"]
_id = "-".join(
[
str(edge["value"]),
operation["server"],
operation["method"],
operation["path"],
]
)
request_body = operation.get("requestBody")
if request_body and request_body.get("body"):
for k in sorted(request_body.get("body").keys()):
_id += "-" + k + "-" + str(request_body.get("body")[k])
parameters = operation.get("parameters")
if parameters:
for k in sorted(parameters.keys()):
_id += "-" + k + "-" + str(parameters[k])
return _id
@staticmethod
def get_all_edges(query_id2inputs_mapping):
res = []
for v in query_id2inputs_mapping.values():
if isinstance(v, list) and len(v) > 0:
res.append(v[0])
return res
@staticmethod
def group_edges(edges):
grouper = itemgetter("operation_id")
new_edges = []
# group all edges based on their API and input_field value
for _, grp in groupby(sorted(edges, key=grouper), grouper):
grp = list(grp)
new_edge = deepcopy(grp[0])
values = set()
for edge in grp:
if isinstance(edge["value"], list):
values |= set(edge["value"])
else:
values.add(edge["value"])
new_edge["value"] = values
new_edges.append(new_edge)
return new_edges
@staticmethod
def subset_mapping_file(edges, mapping_file):
"""Only maintain a subset of mapping file based on edge label."""
mapping_keys = [_item.get("mapping_key") for _item in edges]
if mapping_keys:
mapping_keys += ["@type", "@context"]
return {k: v for (k, v) in mapping_file.items() if k in mapping_keys}
return mapping_file
def construct_api_calls(self, edges):
"""Construct API calls for apicall module using edge groups."""
unique_edge_ids = set()
edge_id2query_id_mapping = {}
query_id2inputs_mapping = defaultdict(list)
edges = self.group_edges(edges)
for edge in edges:
api = edge["api"]
if edge["operation"].get("supportBatch"):
edge["value"] = edge["operation"]["inputSeparator"].join(edge["value"])
edge_id = self.get_unique_edge_id(edge)
if edge_id in unique_edge_ids:
internal_query_id = edge_id2query_id_mapping[edge_id]
else:
internal_query_id = (
"API "
+ self.api_dict[api]["num"]
+ "."
+ str(self.api_dict[api]["alphas"].pop(0))
)
edge["internal_query_id"] = internal_query_id
edge_id2query_id_mapping[edge_id] = internal_query_id
unique_edge_ids.add(edge_id)
edge["internal_query_id"] = internal_query_id
query_id2inputs_mapping[internal_query_id].append(edge)
# internal_query_id += 1
else:
for val in edge["value"]:
new_edge = deepcopy(edge)
new_edge["value"] = val
edge_id = self.get_unique_edge_id(new_edge)
if edge_id in unique_edge_ids:
internal_query_id = edge_id2query_id_mapping[edge_id]
else:
internal_query_id = (
"API "
+ self.api_dict[api]["num"]
+ "."
+ str(self.api_dict[api]["alphas"].pop(0))
)
edge_id2query_id_mapping[edge_id] = internal_query_id
unique_edge_ids.add(edge_id)
new_edge["internal_query_id"] = internal_query_id
query_id2inputs_mapping[internal_query_id].append(new_edge)
# internal_query_id += 1
return query_id2inputs_mapping
@staticmethod
def add_metadata_to_output(operation, res, output_id):
if isinstance(res, dict):
if output_id not in res:
return []
if not isinstance(res[output_id], list):
res[output_id] = [res[output_id]]
new_res = []
for val in res[output_id]:
tmp = deepcopy(res)
tmp[output_id] = val
tmp.update(
{
"$api": operation["api_name"],
"$source": operation.get("source"),
"@type": operation["output_type"],
}
)
new_res.append(tmp)
return new_res
res = {
"$api": operation["api_name"],
"$source": operation.get("source"),
"@type": operation["output_type"],
operation["output_id"]: [res],
}
return [res]
@staticmethod
def count_hits(res):
cnt = 0
if not res:
return cnt
for pred_infos in res.values():
if pred_infos:
for info in pred_infos.values():
cnt += len(info)
return cnt
def dispatch(self, edges, verbose=False, loop=None):
"""Send request to and parse response from API."""
results = {}
self.unique_apis = {_edge["api"] for _edge in edges if _edge}
self.api_dict = {}
for i, _api in enumerate(list(self.unique_apis)):
self.api_dict[_api] = {"alphas": list(range(1, 10000)), "num": str(i + 1)}
query_id2inputs_mapping = self.construct_api_calls(edges)
# print(apis, inputs, modes, vals, grped_edges)
responses, self.log = self.caller.call_apis(
self.get_all_edges(query_id2inputs_mapping), verbose=verbose, loop=loop
)
if verbose:
print("\n\n==== Step #3: Output normalization ====\n")
self.log.append("\n\n==== Step #3: Output normalization ====\n")
for response in responses:
if response["result"] == {}:
continue
output_types = []
query_id = response["internal_query_id"]
total_hits = 0
for edge in query_id2inputs_mapping[query_id]:
operation = edge["operation"]
api_name = edge["api"]
if api_name[:4] in ["semm", "cord"]:
output_types = [edge["output_type"]]
_res = APIPreprocess(
response["result"], operation["api_type"], api_name, output_types
).restructure()
mapping = operation["response_mapping"]
_res = OutputParser(
_res,
mapping,
operation["supportBatch"],
api_name,
operation["api_type"],
).parse()
if not operation["supportBatch"]:
# preprocess biolink results
# if val is not present in results dict and _res is not empty
if not _res:
continue
val = edge["value"]
if val not in results:
results[val] = {}
# loop through API call response
for k, v in _res.items():
# if key is not present in final res, create a list
if k not in results[val]:
results[val][k] = []
if isinstance(v, list):
for _v in v:
_v = self.add_metadata_to_output(
operation, _v, operation["output_id"]
)
total_hits += len(_v)
results[val][k] += _v
else:
v = self.add_metadata_to_output(
operation, v, operation["output_id"]
)
total_hits += len(v)
results[val][k] += v
else:
if not _res:
continue
for m, n in _res.items():
if m not in results:
results[m] = {}
for k, v in n.items():
if k not in results[m]:
results[m][k] = []
if isinstance(v, list):
for _v in v:
_v = self.add_metadata_to_output(
operation, _v, operation["output_id"]
)
total_hits += len(_v)
results[m][k] += _v
elif isinstance(v, dict):
v = self.add_metadata_to_output(
operation, v, operation["output_id"]
)
total_hits += len(v)
results[m][k] += v
else:
if k == "query":
continue
v = self.add_metadata_to_output(
operation, v, operation["output_id"]
)
total_hits += len(v)
results[m][k] += v
if verbose:
if total_hits > 0:
print("{} {}: {} hits".format(query_id, api_name, total_hits))
else:
print("{} {}: No hits".format(query_id, api_name))
if total_hits > 0:
self.log.append("{} {}: {} hits".format(query_id, api_name, total_hits))
else:
self.log.append("{} {}: No hits".format(query_id, api_name))
return (dict(results), self.log)
| apache-2.0 |
halberom/ansible | lib/ansible/modules/cloud/amazon/ec2_ami.py | 25 | 21901 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['stableinterface'],
'supported_by': 'committer',
'version': '1.0'}
DOCUMENTATION = '''
---
module: ec2_ami
version_added: "1.3"
short_description: create or destroy an image in ec2
description:
- Creates or deletes ec2 images.
options:
instance_id:
description:
- Instance ID to create the AMI from.
required: false
default: null
name:
description:
- The name of the new AMI.
required: false
default: null
architecture:
version_added: "2.3"
description:
- The target architecture of the image to register
required: false
default: null
kernel_id:
version_added: "2.3"
description:
- The target kernel id of the image to register
required: false
default: null
virtualization_type:
version_added: "2.3"
description:
- The virtualization type of the image to register
required: false
default: null
root_device_name:
version_added: "2.3"
description:
- The root device name of the image to register
required: false
default: null
wait:
description:
- Wait for the AMI to be in state 'available' before returning.
required: false
default: "no"
choices: [ "yes", "no" ]
wait_timeout:
description:
- How long before wait gives up, in seconds.
default: 300
state:
description:
- Create or deregister/delete AMI.
required: false
default: 'present'
choices: [ "absent", "present" ]
description:
description:
- Human-readable string describing the contents and purpose of the AMI.
required: false
default: null
no_reboot:
description:
- Flag indicating that the bundling process should not attempt to shutdown the instance before bundling. If this flag is True, the responsibility of maintaining file system integrity is left to the owner of the instance.
required: false
default: no
choices: [ "yes", "no" ]
image_id:
description:
- Image ID to be deregistered.
required: false
default: null
device_mapping:
version_added: "2.0"
description:
- List of device hashes/dictionaries with custom configurations (same block-device-mapping parameters)
- "Valid properties include: device_name, volume_type, size (in GB), delete_on_termination (boolean), no_device (boolean), snapshot_id, iops (for io1 volume_type)"
required: false
default: null
delete_snapshot:
description:
- Delete snapshots when deregistering the AMI.
required: false
default: "no"
choices: [ "yes", "no" ]
tags:
description:
- A dictionary of tags to add to the new image; '{"key":"value"}' and '{"key":"value","key":"value"}'
required: false
default: null
version_added: "2.0"
launch_permissions:
description:
- Users and groups that should be able to launch the AMI. Expects
dictionary with a key of user_ids and/or group_names. user_ids should
be a list of account ids. group_name should be a list of groups, "all"
is the only acceptable value currently.
required: false
default: null
version_added: "2.0"
author:
- "Evan Duffield (@scicoin-project) <eduffield@iacquire.com>"
- "Constantin Bugneac (@Constantin07) <constantin.bugneac@endava.com>"
- "Ross Williams (@gunzy83) <gunzy83au@gmail.com>"
extends_documentation_fragment:
- aws
- ec2
'''
# Thank you to iAcquire for sponsoring development of this module.
EXAMPLES = '''
# Basic AMI Creation
- ec2_ami:
aws_access_key: xxxxxxxxxxxxxxxxxxxxxxx
aws_secret_key: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
instance_id: i-xxxxxx
wait: yes
name: newtest
tags:
Name: newtest
Service: TestService
register: image
# Basic AMI Creation, without waiting
- ec2_ami:
aws_access_key: xxxxxxxxxxxxxxxxxxxxxxx
aws_secret_key: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
region: xxxxxx
instance_id: i-xxxxxx
wait: no
name: newtest
register: image
# AMI Registration from EBS Snapshot
- ec2_ami:
aws_access_key: xxxxxxxxxxxxxxxxxxxxxxx
aws_secret_key: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
region: xxxxxx
name: newtest
state: present
architecture: x86_64
virtualization_type: hvm
root_device_name: /dev/xvda
device_mapping:
- device_name: /dev/xvda
size: 8
snapshot_id: snap-xxxxxxxx
delete_on_termination: true
volume_type: gp2
register: image
# AMI Creation, with a custom root-device size and another EBS attached
- ec2_ami:
aws_access_key: xxxxxxxxxxxxxxxxxxxxxxx
aws_secret_key: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
instance_id: i-xxxxxx
name: newtest
device_mapping:
- device_name: /dev/sda1
size: XXX
delete_on_termination: true
volume_type: gp2
- device_name: /dev/sdb
size: YYY
delete_on_termination: false
volume_type: gp2
register: image
# AMI Creation, excluding a volume attached at /dev/sdb
- ec2_ami:
aws_access_key: xxxxxxxxxxxxxxxxxxxxxxx
aws_secret_key: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
instance_id: i-xxxxxx
name: newtest
device_mapping:
- device_name: /dev/sda1
size: XXX
delete_on_termination: true
volume_type: gp2
- device_name: /dev/sdb
no_device: yes
register: image
# Deregister/Delete AMI (keep associated snapshots)
- ec2_ami:
aws_access_key: xxxxxxxxxxxxxxxxxxxxxxx
aws_secret_key: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
region: xxxxxx
image_id: "{{ instance.image_id }}"
delete_snapshot: False
state: absent
# Deregister AMI (delete associated snapshots too)
- ec2_ami:
aws_access_key: xxxxxxxxxxxxxxxxxxxxxxx
aws_secret_key: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
region: xxxxxx
image_id: "{{ instance.image_id }}"
delete_snapshot: True
state: absent
# Update AMI Launch Permissions, making it public
- ec2_ami:
aws_access_key: xxxxxxxxxxxxxxxxxxxxxxx
aws_secret_key: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
region: xxxxxx
image_id: "{{ instance.image_id }}"
state: present
launch_permissions:
group_names: ['all']
# Allow AMI to be launched by another account
- ec2_ami:
aws_access_key: xxxxxxxxxxxxxxxxxxxxxxx
aws_secret_key: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
region: xxxxxx
image_id: "{{ instance.image_id }}"
state: present
launch_permissions:
user_ids: ['123456789012']
'''
RETURN = '''
architecture:
description: architecture of image
returned: when AMI is created or already exists
type: string
sample: "x86_64"
block_device_mapping:
description: block device mapping associated with image
returned: when AMI is created or already exists
type: a dictionary of block devices
sample: {
"/dev/sda1": {
"delete_on_termination": true,
"encrypted": false,
"size": 10,
"snapshot_id": "snap-1a03b80e7",
"volume_type": "standard"
}
}
creationDate:
description: creation date of image
returned: when AMI is created or already exists
type: string
sample: "2015-10-15T22:43:44.000Z"
description:
description: description of image
returned: when AMI is created or already exists
type: string
sample: "nat-server"
hypervisor:
description: type of hypervisor
returned: when AMI is created or already exists
type: string
sample: "xen"
image_id:
description: id of the image
returned: when AMI is created or already exists
type: string
sample: "ami-1234abcd"
is_public:
description: whether image is public
returned: when AMI is created or already exists
type: bool
sample: false
location:
description: location of image
returned: when AMI is created or already exists
type: string
sample: "315210894379/nat-server"
name:
description: ami name of image
returned: when AMI is created or already exists
type: string
sample: "nat-server"
ownerId:
description: owner of image
returned: when AMI is created or already exists
type: string
sample: "435210894375"
platform:
description: platform of image
returned: when AMI is created or already exists
type: string
sample: null
root_device_name:
description: root device name of image
returned: when AMI is created or already exists
type: string
sample: "/dev/sda1"
root_device_type:
description: root device type of image
returned: when AMI is created or already exists
type: string
sample: "ebs"
state:
description: state of image
returned: when AMI is created or already exists
type: string
sample: "available"
tags:
description: a dictionary of tags assigned to image
returned: when AMI is created or already exists
type: dictionary of tags
sample: {
"Env": "devel",
"Name": "nat-server"
}
virtualization_type:
description: image virtualization type
returned: when AMI is created or already exists
type: string
sample: "hvm"
snapshots_deleted:
description: a list of snapshot ids deleted after deregistering image
returned: after AMI is deregistered, if 'delete_snapshot' is set to 'yes'
type: list
sample: [
"snap-fbcccb8f",
"snap-cfe7cdb4"
]
'''
import sys
import time
try:
import boto
import boto.ec2
from boto.ec2.blockdevicemapping import BlockDeviceType, BlockDeviceMapping
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
def get_block_device_mapping(image):
"""
Retrieves block device mapping from AMI
"""
bdm_dict = dict()
if image is not None and hasattr(image, 'block_device_mapping'):
bdm = getattr(image,'block_device_mapping')
for device_name in bdm.keys():
bdm_dict[device_name] = {
'size': bdm[device_name].size,
'snapshot_id': bdm[device_name].snapshot_id,
'volume_type': bdm[device_name].volume_type,
'encrypted': bdm[device_name].encrypted,
'delete_on_termination': bdm[device_name].delete_on_termination
}
return bdm_dict
def get_ami_info(image):
return dict(
image_id=image.id,
state=image.state,
architecture=image.architecture,
block_device_mapping=get_block_device_mapping(image),
creationDate=image.creationDate,
description=image.description,
hypervisor=image.hypervisor,
is_public=image.is_public,
location=image.location,
ownerId=image.ownerId,
root_device_name=image.root_device_name,
root_device_type=image.root_device_type,
tags=image.tags,
virtualization_type = image.virtualization_type
)
def create_image(module, ec2):
"""
Creates new AMI
module : AnsibleModule object
ec2: authenticated ec2 connection object
"""
instance_id = module.params.get('instance_id')
name = module.params.get('name')
wait = module.params.get('wait')
wait_timeout = int(module.params.get('wait_timeout'))
description = module.params.get('description')
architecture = module.params.get('architecture')
kernel_id = module.params.get('kernel_id')
root_device_name = module.params.get('root_device_name')
virtualization_type = module.params.get('virtualization_type')
no_reboot = module.params.get('no_reboot')
device_mapping = module.params.get('device_mapping')
tags = module.params.get('tags')
launch_permissions = module.params.get('launch_permissions')
try:
params = {'name': name,
'description': description}
images = ec2.get_all_images(filters={'name': name})
if images and images[0]:
# ensure that launch_permissions are up to date
update_image(module, ec2, images[0].id)
bdm = None
if device_mapping:
bdm = BlockDeviceMapping()
for device in device_mapping:
if 'device_name' not in device:
module.fail_json(msg = 'Device name must be set for volume')
device_name = device['device_name']
del device['device_name']
bd = BlockDeviceType(**device)
bdm[device_name] = bd
if instance_id:
params['instance_id'] = instance_id
params['no_reboot'] = no_reboot
if bdm:
params['block_device_mapping'] = bdm
image_id = ec2.create_image(**params)
else:
params['architecture'] = architecture
params['virtualization_type'] = virtualization_type
if kernel_id:
params['kernel_id'] = kernel_id
if root_device_name:
params['root_device_name'] = root_device_name
if bdm:
params['block_device_map'] = bdm
image_id = ec2.register_image(**params)
except boto.exception.BotoServerError as e:
module.fail_json(msg="%s: %s" % (e.error_code, e.error_message))
# Wait until the image is recognized. EC2 API has eventual consistency,
# such that a successful CreateImage API call doesn't guarantee the success
# of subsequent DescribeImages API call using the new image id returned.
for i in range(wait_timeout):
try:
img = ec2.get_image(image_id)
if img.state == 'available':
break
elif img.state == 'failed':
module.fail_json(msg="AMI creation failed, please see the AWS console for more details")
except boto.exception.EC2ResponseError as e:
if ('InvalidAMIID.NotFound' not in e.error_code and 'InvalidAMIID.Unavailable' not in e.error_code) and wait and i == wait_timeout - 1:
module.fail_json(msg="Error while trying to find the new image. Using wait=yes and/or a longer wait_timeout may help. %s: %s" % (e.error_code, e.error_message))
finally:
time.sleep(1)
if img.state != 'available':
module.fail_json(msg="Error while trying to find the new image. Using wait=yes and/or a longer wait_timeout may help.")
if tags:
try:
ec2.create_tags(image_id, tags)
except boto.exception.EC2ResponseError as e:
module.fail_json(msg = "Image tagging failed => %s: %s" % (e.error_code, e.error_message))
if launch_permissions:
try:
img = ec2.get_image(image_id)
img.set_launch_permissions(**launch_permissions)
except boto.exception.BotoServerError as e:
module.fail_json(msg="%s: %s" % (e.error_code, e.error_message), image_id=image_id)
module.exit_json(msg="AMI creation operation complete", changed=True, **get_ami_info(img))
def deregister_image(module, ec2):
"""
Deregisters AMI
"""
image_id = module.params.get('image_id')
delete_snapshot = module.params.get('delete_snapshot')
wait = module.params.get('wait')
wait_timeout = int(module.params.get('wait_timeout'))
img = ec2.get_image(image_id)
if img is None:
module.fail_json(msg = "Image %s does not exist" % image_id, changed=False)
# Get all associated snapshot ids before deregistering image otherwise this information becomes unavailable
snapshots = []
if hasattr(img, 'block_device_mapping'):
for key in img.block_device_mapping:
snapshots.append(img.block_device_mapping[key].snapshot_id)
# When trying to re-delete already deleted image it doesn't raise an exception
# It just returns an object without image attributes
if hasattr(img, 'id'):
try:
params = {'image_id': image_id,
'delete_snapshot': delete_snapshot}
res = ec2.deregister_image(**params)
except boto.exception.BotoServerError as e:
module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
else:
module.exit_json(msg = "Image %s has already been deleted" % image_id, changed=False)
# wait here until the image is gone
img = ec2.get_image(image_id)
wait_timeout = time.time() + wait_timeout
while wait and wait_timeout > time.time() and img is not None:
img = ec2.get_image(image_id)
time.sleep(3)
if wait and wait_timeout <= time.time():
# waiting took too long
module.fail_json(msg = "timed out waiting for image to be deregistered/deleted")
# Boto library has hardcoded the deletion of the snapshot for the root volume mounted as '/dev/sda1' only
# Make it possible to delete all snapshots which belong to image, including root block device mapped as '/dev/xvda'
if delete_snapshot:
try:
for snapshot_id in snapshots:
ec2.delete_snapshot(snapshot_id)
except boto.exception.BotoServerError as e:
if e.error_code == 'InvalidSnapshot.NotFound':
# Don't error out if root volume snapshot was already deleted as part of deregister_image
pass
module.exit_json(msg="AMI deregister/delete operation complete", changed=True, snapshots_deleted=snapshots)
else:
module.exit_json(msg="AMI deregister/delete operation complete", changed=True)
def update_image(module, ec2, image_id):
"""
Updates AMI
"""
launch_permissions = module.params.get('launch_permissions') or []
if 'user_ids' in launch_permissions:
launch_permissions['user_ids'] = [str(user_id) for user_id in launch_permissions['user_ids']]
img = ec2.get_image(image_id)
if img is None:
module.fail_json(msg = "Image %s does not exist" % image_id, changed=False)
try:
set_permissions = img.get_launch_permissions()
if set_permissions != launch_permissions:
if ('user_ids' in launch_permissions and launch_permissions['user_ids']) or ('group_names' in launch_permissions and launch_permissions['group_names']):
res = img.set_launch_permissions(**launch_permissions)
elif ('user_ids' in set_permissions and set_permissions['user_ids']) or ('group_names' in set_permissions and set_permissions['group_names']):
res = img.remove_launch_permissions(**set_permissions)
else:
module.exit_json(msg="AMI not updated", launch_permissions=set_permissions, changed=False)
module.exit_json(msg="AMI launch permissions updated", launch_permissions=launch_permissions, set_perms=set_permissions, changed=True)
else:
module.exit_json(msg="AMI not updated", launch_permissions=set_permissions, changed=False)
except boto.exception.BotoServerError as e:
module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
instance_id = dict(),
image_id = dict(),
architecture = dict(default="x86_64"),
kernel_id = dict(),
virtualization_type = dict(default="hvm"),
root_device_name = dict(),
delete_snapshot = dict(default=False, type='bool'),
name = dict(),
wait = dict(type='bool', default=False),
wait_timeout = dict(default=900),
description = dict(default=""),
no_reboot = dict(default=False, type='bool'),
state = dict(default='present'),
device_mapping = dict(type='list'),
tags = dict(type='dict'),
launch_permissions = dict(type='dict')
)
)
module = AnsibleModule(argument_spec=argument_spec)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
try:
ec2 = ec2_connect(module)
except Exception as e:
module.fail_json(msg="Error while connecting to aws: %s" % str(e))
if module.params.get('state') == 'absent':
if not module.params.get('image_id'):
module.fail_json(msg='image_id needs to be an ami image to registered/delete')
deregister_image(module, ec2)
elif module.params.get('state') == 'present':
if module.params.get('image_id') and module.params.get('launch_permissions'):
# Update image's launch permissions
update_image(module, ec2,module.params.get('image_id'))
# Changed is always set to true when provisioning new AMI
if not module.params.get('instance_id') and not module.params.get('device_mapping'):
module.fail_json(msg='instance_id or device_mapping (register from ebs snapshot) parameter is required for new image')
if not module.params.get('name'):
module.fail_json(msg='name parameter is required for new image')
create_image(module, ec2)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
if __name__ == '__main__':
main()
| gpl-3.0 |
elmerdpadilla/iv | addons/purchase/company.py | 383 | 1576 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv,fields
class company(osv.osv):
_inherit = 'res.company'
_columns = {
'po_lead': fields.float(
'Purchase Lead Time', required=True,
help="Margin of error for supplier lead times. When the system"\
"generates Purchase Orders for procuring products,"\
"they will be scheduled that many days earlier "\
"to cope with unexpected supplier delays."),
}
_defaults = {
'po_lead': lambda *a: 1.0,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
intellimath/pyon | test/test_perfomance.py | 1 | 1913 | import pyon
import pickle
import time
import ast
import json
class C(object):
def __init__(self, count):
self.count = count
#
def __reduce__(self):
_dict = dict(self.__dict__)
count = _dict.pop('count')
return C, (count,), _dict
lst = []
for i in range(10000):
c = C(i)
c.name = "aaa" + str(i)
c.age = 100*i % 37
c.child = C(i)
c.child.name = "bbb" + str(i)
c.child.age = 101*i % 37
lst.append(c)
json_lst = []
for i in range(10000):
c= {'name':"aaa" + str(i), 'count':i, 'age': 100*i % 37, 'child': {'name':"bbb" + str(i), 'count':i, 'age':101*i % 37}}
json_lst.append(c)
print("--- PyON ---")
t0 = time.time()
text = pyon.dumps(lst, fast=True)
#print(text)
t1 = time.time()
#print('Text length = ', len(text))
print('pyon dumps:', t1-t0)
t2 = time.time()
obj = pyon.loads(text)
t3 = time.time()
print('pyon loads:', t3-t2)
tree = ast.parse(text)
t2 = time.time()
obj = pyon.loads(tree)
t3 = time.time()
print('pyon loads without ast part:', t3-t2)
#print('List length = ',len(obj))
print("--- pickle ---")
t0 = time.time()
text = pickle.dumps(lst)
t1 = time.time()
print('Text length = ', len(text))
print('pickle dumps:', t1-t0)
t2 = time.time()
obj = pickle.loads(text)
t3 = time.time()
print('pickle loads:', t3-t2)
print('List length = ',len(obj))
print("--- json ---")
t0 = time.time()
text = json.dumps(json_lst)
t1 = time.time()
print('json dumps:', t1-t0)
t0 = time.time()
ob = json.loads(text)
t1 = time.time()
print('json loads:', t1-t0)
t0 = time.time()
text = pyon.dumps(json_lst, fast=True)
t1 = time.time()
print('pyon dumps:', t1-t0)
t0 = time.time()
ob = pyon.loads(text)
t1 = time.time()
print('pyon loads:', t1-t0)
tree = ast.parse(text)
t0 = time.time()
ob = pyon.loads(tree)
t1 = time.time()
print('pyon loads without ast part:', t1-t0) | mit |
shuggiefisher/django-guardian | guardian/tests/mixins_test.py | 12 | 4973 | from django.contrib.auth.models import User, AnonymousUser
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import ImproperlyConfigured
from django.core.exceptions import PermissionDenied
from django.http import HttpResponse
from django.test import TestCase
from django.test.client import RequestFactory
from django.views.generic import View
from mock import Mock
from guardian.mixins import LoginRequiredMixin
from guardian.mixins import PermissionRequiredMixin
class DatabaseRemovedError(Exception):
pass
class RemoveDatabaseView(View):
def get(self, request, *args, **kwargs):
raise DatabaseRemovedError("You've just allowed db to be removed!")
class TestView(PermissionRequiredMixin, RemoveDatabaseView):
permission_required = 'contenttypes.change_contenttype'
object = None # should be set at each tests explicitly
class TestViewMixins(TestCase):
def setUp(self):
self.ctype = ContentType.objects.create(name='foo', model='bar',
app_label='fake-for-guardian-tests')
self.factory = RequestFactory()
self.user = User.objects.create_user('joe', 'joe@doe.com', 'doe')
self.client.login(username='joe', password='doe')
def test_permission_is_checked_before_view_is_computed(self):
"""
This test would fail if permission is checked **after** view is
actually resolved.
"""
request = self.factory.get('/')
request.user = self.user
# View.object is set
view = TestView.as_view(object=self.ctype)
response = view(request)
self.assertEqual(response.status_code, 302)
# View.get_object returns object
TestView.get_object = lambda instance: self.ctype
view = TestView.as_view()
response = view(request)
self.assertEqual(response.status_code, 302)
del TestView.get_object
def test_permission_is_checked_before_view_is_computed_perm_denied_raised(self):
"""
This test would fail if permission is checked **after** view is
actually resolved.
"""
request = self.factory.get('/')
request.user = self.user
view = TestView.as_view(raise_exception=True, object=self.ctype)
with self.assertRaises(PermissionDenied):
view(request)
def test_permission_required_view_configured_wrongly(self):
"""
This test would fail if permission is checked **after** view is
actually resolved.
"""
request = self.factory.get('/')
request.user = self.user
request.user.add_obj_perm('change_contenttype', self.ctype)
view = TestView.as_view(permission_required=None, object=self.ctype)
with self.assertRaises(ImproperlyConfigured):
view(request)
def test_permission_required(self):
"""
This test would fail if permission is checked **after** view is
actually resolved.
"""
request = self.factory.get('/')
request.user = self.user
request.user.add_obj_perm('change_contenttype', self.ctype)
view = TestView.as_view(object=self.ctype)
with self.assertRaises(DatabaseRemovedError):
view(request)
def test_permission_required_as_list(self):
"""
This test would fail if permission is checked **after** view is
actually resolved.
"""
global TestView
class SecretView(TestView):
on_permission_check_fail = Mock()
request = self.factory.get('/')
request.user = self.user
request.user.add_obj_perm('change_contenttype', self.ctype)
SecretView.permission_required = ['contenttypes.change_contenttype',
'contenttypes.add_contenttype']
view = SecretView.as_view(object=self.ctype)
response = view(request)
self.assertEqual(response.status_code, 302)
SecretView.on_permission_check_fail.assert_called_once_with(request,
response, obj=self.ctype)
request.user.add_obj_perm('add_contenttype', self.ctype)
with self.assertRaises(DatabaseRemovedError):
view(request)
def test_login_required_mixin(self):
class SecretView(LoginRequiredMixin, View):
redirect_field_name = 'foobar'
login_url = '/let-me-in/'
def get(self, request):
return HttpResponse('secret-view')
request = self.factory.get('/some-secret-page/')
request.user = AnonymousUser()
view = SecretView.as_view()
response = view(request)
self.assertEqual(response.status_code, 302)
self.assertEqual(response['Location'],
'/let-me-in/?foobar=/some-secret-page/')
request.user = self.user
response = view(request)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, 'secret-view')
| bsd-2-clause |
benpatterson/edx-platform | cms/djangoapps/contentstore/course_info_model.py | 112 | 6790 | """
Views for viewing, adding, updating and deleting course updates.
Current db representation:
{
"_id" : locationjson,
"definition" : {
"data" : "<ol>[<li><h2>date</h2>content</li>]</ol>"},
"items" : [{"id": ID, "date": DATE, "content": CONTENT}]
"metadata" : ignored
}
}
"""
import re
import logging
from django.http import HttpResponseBadRequest
from django.utils.translation import ugettext as _
from xmodule.modulestore.exceptions import ItemNotFoundError
from xmodule.modulestore.django import modulestore
from xmodule.html_module import CourseInfoModule
from openedx.core.lib.xblock_utils import get_course_update_items
from cms.djangoapps.contentstore.push_notification import enqueue_push_course_update
# # This should be in a class which inherits from XmlDescriptor
log = logging.getLogger(__name__)
def get_course_updates(location, provided_id, user_id):
"""
Retrieve the relevant course_info updates and unpack into the model which the client expects:
[{id : index, date : string, content : html string}]
"""
try:
course_updates = modulestore().get_item(location)
except ItemNotFoundError:
course_updates = modulestore().create_item(user_id, location.course_key, location.block_type, location.block_id)
course_update_items = get_course_update_items(course_updates, _get_index(provided_id))
return _get_visible_update(course_update_items)
def update_course_updates(location, update, passed_id=None, user=None):
"""
Either add or update the given course update.
Add:
If the passed_id is absent or None, the course update is added.
If push_notification_selected is set in the update, a celery task for the push notification is created.
Update:
It will update it if it has a passed_id which has a valid value.
Until updates have distinct values, the passed_id is the location url + an index into the html structure.
"""
try:
course_updates = modulestore().get_item(location)
except ItemNotFoundError:
course_updates = modulestore().create_item(user.id, location.course_key, location.block_type, location.block_id)
course_update_items = list(reversed(get_course_update_items(course_updates)))
if passed_id is not None:
passed_index = _get_index(passed_id)
# oldest update at start of list
if 0 < passed_index <= len(course_update_items):
course_update_dict = course_update_items[passed_index - 1]
course_update_dict["date"] = update["date"]
course_update_dict["content"] = update["content"]
course_update_items[passed_index - 1] = course_update_dict
else:
return HttpResponseBadRequest(_("Invalid course update id."))
else:
course_update_dict = {
"id": len(course_update_items) + 1,
"date": update["date"],
"content": update["content"],
"status": CourseInfoModule.STATUS_VISIBLE
}
course_update_items.append(course_update_dict)
enqueue_push_course_update(update, location.course_key)
# update db record
save_course_update_items(location, course_updates, course_update_items, user)
# remove status key
if "status" in course_update_dict:
del course_update_dict["status"]
return course_update_dict
def _make_update_dict(update):
"""
Return course update item as a dictionary with required keys ('id', "date" and "content").
"""
return {
"id": update["id"],
"date": update["date"],
"content": update["content"],
}
def _get_visible_update(course_update_items):
"""
Filter course update items which have status "deleted".
"""
if isinstance(course_update_items, dict):
# single course update item
if course_update_items.get("status") != CourseInfoModule.STATUS_DELETED:
return _make_update_dict(course_update_items)
else:
# requested course update item has been deleted (soft delete)
return {"error": _("Course update not found."), "status": 404}
return ([_make_update_dict(update) for update in course_update_items
if update.get("status") != CourseInfoModule.STATUS_DELETED])
# pylint: disable=unused-argument
def delete_course_update(location, update, passed_id, user):
"""
Don't delete course update item from db.
Delete the given course_info update by settings "status" flag to 'deleted'.
Returns the resulting course_updates.
"""
if not passed_id:
return HttpResponseBadRequest()
try:
course_updates = modulestore().get_item(location)
except ItemNotFoundError:
return HttpResponseBadRequest()
course_update_items = list(reversed(get_course_update_items(course_updates)))
passed_index = _get_index(passed_id)
# delete update item from given index
if 0 < passed_index <= len(course_update_items):
course_update_item = course_update_items[passed_index - 1]
# soft delete course update item
course_update_item["status"] = CourseInfoModule.STATUS_DELETED
course_update_items[passed_index - 1] = course_update_item
# update db record
save_course_update_items(location, course_updates, course_update_items, user)
return _get_visible_update(course_update_items)
else:
return HttpResponseBadRequest(_("Invalid course update id."))
def _get_index(passed_id=None):
"""
From the url w/ index appended, get the index.
"""
if passed_id:
index_matcher = re.search(r'.*?/?(\d+)$', passed_id)
if index_matcher:
return int(index_matcher.group(1))
# return 0 if no index found
return 0
def _get_html(course_updates_items):
"""
Method to create course_updates_html from course_updates items
"""
list_items = []
for update in reversed(course_updates_items):
# filter course update items which have status "deleted".
if update.get("status") != CourseInfoModule.STATUS_DELETED:
list_items.append(u"<article><h2>{date}</h2>{content}</article>".format(**update))
return u"<section>{list_items}</section>".format(list_items="".join(list_items))
def save_course_update_items(location, course_updates, course_update_items, user=None):
"""
Save list of course_updates data dictionaries in new field ("course_updates.items")
and html related to course update in 'data' ("course_updates.data") field.
"""
course_updates.items = course_update_items
course_updates.data = _get_html(course_update_items)
# update db record
modulestore().update_item(course_updates, user.id)
return course_updates
| agpl-3.0 |
armadill-odyssey/aima-python | submissions/VanderKallen/myBayes.py | 15 | 1344 | import traceback
from submissions.VanderKallen import slavery
class DataFrame:
data = []
feature_names = []
target = []
target_names = []
slaveTot = DataFrame()
slaveAS = DataFrame
transactions = slavery.get_transaction()
slaveTot.data = []
slaveAS.data = []
def genderNumber(gender):
if gender == 'M':
return 1
else:
return 0
for seller in transactions:
# choose the input values
slaveTot.data.append([
seller['Transaction']['Number of Child Slaves'],
seller['Transaction']['Number of Adult Slaves'],
])
slaveAS.data.append([
seller['Slave']['Age'],
genderNumber(seller['Slave']['Gender']),
])
slaveAS.feature_names = [
'Age',
'Gender',
]
slaveTot.feature_names = [
'Children',
'Adults',
]
slaveTot.target = []
slaveAS.target = []
def priceTarget(price):
if price < 410:
return 1
return 0
for deal in transactions:
# choose the target
tt = priceTarget(deal['Transaction']['Sale Details']['Price'])
slaveTot.target.append(tt)
slaveAS.target.append(tt)
slaveTot.target_names = [
'Price <= $410',
'Price > $410',
]
slaveAS.target_names = [
'Price <= $410',
'Price > $410',
]
Examples = {
'Sales by Children and Adults': slaveTot,
'Sales by Age and Sex': slaveAS
} | mit |
BruceDLong/CodeDog | Scons/scons-local-4.1.0.post1/SCons/Tool/applelink.py | 4 | 7117 | """SCons.Tool.applelink
Tool-specific initialization for Apple's gnu-like linker.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# MIT License
#
# Copyright The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# Even though the Mac is based on the GNU toolchain, it doesn't understand
# the -rpath option, so we use the "link" tool instead of "gnulink".
from SCons.Util import CLVar
from . import link
# User programmatically describes how SHLIBVERSION maps to values for compat/current.
_APPLELIB_MAX_VERSION_VALUES = (65535, 255, 255)
class AppleLinkInvalidCurrentVersionException(Exception):
pass
class AppleLinkInvalidCompatibilityVersionException(Exception):
pass
def _applelib_check_valid_version(version_string):
"""
Check that the version # is valid.
X[.Y[.Z]]
where X 0-65535
where Y either not specified or 0-255
where Z either not specified or 0-255
:param version_string:
:return:
"""
parts = version_string.split('.')
if len(parts) > 3:
return False, "Version string has too many periods [%s]" % version_string
if len(parts) <= 0:
return False, "Version string unspecified [%s]" % version_string
for (i, p) in enumerate(parts):
try:
p_i = int(p)
except ValueError:
return False, "Version component %s (from %s) is not a number" % (p, version_string)
if p_i < 0 or p_i > _APPLELIB_MAX_VERSION_VALUES[i]:
return False, "Version component %s (from %s) is not valid value should be between 0 and %d" % (
p, version_string, _APPLELIB_MAX_VERSION_VALUES[i])
return True, ""
def _applelib_currentVersionFromSoVersion(source, target, env, for_signature):
"""
A generator function to create the -Wl,-current_version flag if needed.
If env['APPLELINK_NO_CURRENT_VERSION'] contains a true value no flag will be generated
Otherwise if APPLELINK_CURRENT_VERSION is not specified, env['SHLIBVERSION']
will be used.
:param source:
:param target:
:param env:
:param for_signature:
:return: A string providing the flag to specify the current_version of the shared library
"""
if env.get('APPLELINK_NO_CURRENT_VERSION', False):
return ""
elif env.get('APPLELINK_CURRENT_VERSION', False):
version_string = env['APPLELINK_CURRENT_VERSION']
elif env.get('SHLIBVERSION', False):
version_string = env['SHLIBVERSION']
else:
return ""
version_string = ".".join(version_string.split('.')[:3])
valid, reason = _applelib_check_valid_version(version_string)
if not valid:
raise AppleLinkInvalidCurrentVersionException(reason)
return "-Wl,-current_version,%s" % version_string
def _applelib_compatVersionFromSoVersion(source, target, env, for_signature):
"""
A generator function to create the -Wl,-compatibility_version flag if needed.
If env['APPLELINK_NO_COMPATIBILITY_VERSION'] contains a true value no flag will be generated
Otherwise if APPLELINK_COMPATIBILITY_VERSION is not specified
the first two parts of env['SHLIBVERSION'] will be used with a .0 appended.
:param source:
:param target:
:param env:
:param for_signature:
:return: A string providing the flag to specify the compatibility_version of the shared library
"""
if env.get('APPLELINK_NO_COMPATIBILITY_VERSION', False):
return ""
elif env.get('APPLELINK_COMPATIBILITY_VERSION', False):
version_string = env['APPLELINK_COMPATIBILITY_VERSION']
elif env.get('SHLIBVERSION', False):
version_string = ".".join(env['SHLIBVERSION'].split('.')[:2] + ['0'])
else:
return ""
if version_string is None:
return ""
valid, reason = _applelib_check_valid_version(version_string)
if not valid:
raise AppleLinkInvalidCompatibilityVersionException(reason)
return "-Wl,-compatibility_version,%s" % version_string
def generate(env):
"""Add Builders and construction variables for applelink to an
Environment."""
link.generate(env)
env['FRAMEWORKPATHPREFIX'] = '-F'
env['_FRAMEWORKPATH'] = '${_concat(FRAMEWORKPATHPREFIX, FRAMEWORKPATH, "", __env__, RDirs)}'
env['_FRAMEWORKS'] = '${_concat("-framework ", FRAMEWORKS, "", __env__)}'
env['LINKCOM'] = env['LINKCOM'] + ' $_FRAMEWORKPATH $_FRAMEWORKS $FRAMEWORKSFLAGS'
env['SHLINKFLAGS'] = CLVar('$LINKFLAGS -dynamiclib')
env['SHLINKCOM'] = env['SHLINKCOM'] + ' $_FRAMEWORKPATH $_FRAMEWORKS $FRAMEWORKSFLAGS'
env['_APPLELINK_CURRENT_VERSION'] = _applelib_currentVersionFromSoVersion
env['_APPLELINK_COMPATIBILITY_VERSION'] = _applelib_compatVersionFromSoVersion
env['_SHLIBVERSIONFLAGS'] = '$_APPLELINK_CURRENT_VERSION $_APPLELINK_COMPATIBILITY_VERSION '
env['_LDMODULEVERSIONFLAGS'] = '$_APPLELINK_CURRENT_VERSION $_APPLELINK_COMPATIBILITY_VERSION '
# override the default for loadable modules, which are different
# on OS X than dynamic shared libs. echoing what XCode does for
# pre/suffixes:
env['LDMODULEPREFIX'] = ''
env['LDMODULESUFFIX'] = ''
env['LDMODULEFLAGS'] = CLVar('$LINKFLAGS -bundle')
env['LDMODULECOM'] = '$LDMODULE -o ${TARGET} $LDMODULEFLAGS' \
' $SOURCES $_LIBDIRFLAGS $_LIBFLAGS $_FRAMEWORKPATH $_FRAMEWORKS $FRAMEWORKSFLAGS'
# New stuff
#
env['_SHLIBSUFFIX'] = '${_SHLIBVERSION}${SHLIBSUFFIX}'
env['__SHLIBVERSIONFLAGS'] = '${__lib_either_version_flag(__env__,' \
'"SHLIBVERSION","_APPLELINK_CURRENT_VERSION", "_SHLIBVERSIONFLAGS")}'
env['__LDMODULEVERSIONFLAGS'] = '${__lib_either_version_flag(__env__,' \
'"LDMODULEVERSION","_APPLELINK_CURRENT_VERSION", "_LDMODULEVERSIONFLAGS")}'
def exists(env):
return env['PLATFORM'] == 'darwin'
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| gpl-2.0 |
LinusU/ansible | contrib/inventory/ec2.py | 20 | 54413 | #!/usr/bin/env python
'''
EC2 external inventory script
=================================
Generates inventory that Ansible can understand by making API request to
AWS EC2 using the Boto library.
NOTE: This script assumes Ansible is being executed where the environment
variables needed for Boto have already been set:
export AWS_ACCESS_KEY_ID='AK123'
export AWS_SECRET_ACCESS_KEY='abc123'
This script also assumes there is an ec2.ini file alongside it. To specify a
different path to ec2.ini, define the EC2_INI_PATH environment variable:
export EC2_INI_PATH=/path/to/my_ec2.ini
If you're using eucalyptus you need to set the above variables and
you need to define:
export EC2_URL=http://hostname_of_your_cc:port/services/Eucalyptus
If you're using boto profiles (requires boto>=2.24.0) you can choose a profile
using the --boto-profile command line argument (e.g. ec2.py --boto-profile prod) or using
the AWS_PROFILE variable:
AWS_PROFILE=prod ansible-playbook -i ec2.py myplaybook.yml
For more details, see: http://docs.pythonboto.org/en/latest/boto_config_tut.html
When run against a specific host, this script returns the following variables:
- ec2_ami_launch_index
- ec2_architecture
- ec2_association
- ec2_attachTime
- ec2_attachment
- ec2_attachmentId
- ec2_client_token
- ec2_deleteOnTermination
- ec2_description
- ec2_deviceIndex
- ec2_dns_name
- ec2_eventsSet
- ec2_group_name
- ec2_hypervisor
- ec2_id
- ec2_image_id
- ec2_instanceState
- ec2_instance_type
- ec2_ipOwnerId
- ec2_ip_address
- ec2_item
- ec2_kernel
- ec2_key_name
- ec2_launch_time
- ec2_monitored
- ec2_monitoring
- ec2_networkInterfaceId
- ec2_ownerId
- ec2_persistent
- ec2_placement
- ec2_platform
- ec2_previous_state
- ec2_private_dns_name
- ec2_private_ip_address
- ec2_publicIp
- ec2_public_dns_name
- ec2_ramdisk
- ec2_reason
- ec2_region
- ec2_requester_id
- ec2_root_device_name
- ec2_root_device_type
- ec2_security_group_ids
- ec2_security_group_names
- ec2_shutdown_state
- ec2_sourceDestCheck
- ec2_spot_instance_request_id
- ec2_state
- ec2_state_code
- ec2_state_reason
- ec2_status
- ec2_subnet_id
- ec2_tenancy
- ec2_virtualization_type
- ec2_vpc_id
These variables are pulled out of a boto.ec2.instance object. There is a lack of
consistency with variable spellings (camelCase and underscores) since this
just loops through all variables the object exposes. It is preferred to use the
ones with underscores when multiple exist.
In addition, if an instance has AWS Tags associated with it, each tag is a new
variable named:
- ec2_tag_[Key] = [Value]
Security groups are comma-separated in 'ec2_security_group_ids' and
'ec2_security_group_names'.
'''
# (c) 2012, Peter Sankauskas
#
# This file is part of Ansible,
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
######################################################################
import sys
import os
import argparse
import re
from time import time
import boto
from boto import ec2
from boto import rds
from boto import elasticache
from boto import route53
import six
from six.moves import configparser
from collections import defaultdict
try:
import json
except ImportError:
import simplejson as json
class Ec2Inventory(object):
def _empty_inventory(self):
return {"_meta" : {"hostvars" : {}}}
def __init__(self):
''' Main execution path '''
# Inventory grouped by instance IDs, tags, security groups, regions,
# and availability zones
self.inventory = self._empty_inventory()
# Index of hostname (address) to instance ID
self.index = {}
# Boto profile to use (if any)
self.boto_profile = None
# Read settings and parse CLI arguments
self.parse_cli_args()
self.read_settings()
# Make sure that profile_name is not passed at all if not set
# as pre 2.24 boto will fall over otherwise
if self.boto_profile:
if not hasattr(boto.ec2.EC2Connection, 'profile_name'):
self.fail_with_error("boto version must be >= 2.24 to use profile")
# Cache
if self.args.refresh_cache:
self.do_api_calls_update_cache()
elif not self.is_cache_valid():
self.do_api_calls_update_cache()
# Data to print
if self.args.host:
data_to_print = self.get_host_info()
elif self.args.list:
# Display list of instances for inventory
if self.inventory == self._empty_inventory():
data_to_print = self.get_inventory_from_cache()
else:
data_to_print = self.json_format_dict(self.inventory, True)
print(data_to_print)
def is_cache_valid(self):
''' Determines if the cache files have expired, or if it is still valid '''
if os.path.isfile(self.cache_path_cache):
mod_time = os.path.getmtime(self.cache_path_cache)
current_time = time()
if (mod_time + self.cache_max_age) > current_time:
if os.path.isfile(self.cache_path_index):
return True
return False
def read_settings(self):
''' Reads the settings from the ec2.ini file '''
if six.PY3:
config = configparser.ConfigParser()
else:
config = configparser.SafeConfigParser()
ec2_default_ini_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'ec2.ini')
ec2_ini_path = os.path.expanduser(os.path.expandvars(os.environ.get('EC2_INI_PATH', ec2_default_ini_path)))
config.read(ec2_ini_path)
# is eucalyptus?
self.eucalyptus_host = None
self.eucalyptus = False
if config.has_option('ec2', 'eucalyptus'):
self.eucalyptus = config.getboolean('ec2', 'eucalyptus')
if self.eucalyptus and config.has_option('ec2', 'eucalyptus_host'):
self.eucalyptus_host = config.get('ec2', 'eucalyptus_host')
# Regions
self.regions = []
configRegions = config.get('ec2', 'regions')
configRegions_exclude = config.get('ec2', 'regions_exclude')
if (configRegions == 'all'):
if self.eucalyptus_host:
self.regions.append(boto.connect_euca(host=self.eucalyptus_host).region.name)
else:
for regionInfo in ec2.regions():
if regionInfo.name not in configRegions_exclude:
self.regions.append(regionInfo.name)
else:
self.regions = configRegions.split(",")
# Destination addresses
self.destination_variable = config.get('ec2', 'destination_variable')
self.vpc_destination_variable = config.get('ec2', 'vpc_destination_variable')
# Route53
self.route53_enabled = config.getboolean('ec2', 'route53')
self.route53_excluded_zones = []
if config.has_option('ec2', 'route53_excluded_zones'):
self.route53_excluded_zones.extend(
config.get('ec2', 'route53_excluded_zones', '').split(','))
# Include RDS instances?
self.rds_enabled = True
if config.has_option('ec2', 'rds'):
self.rds_enabled = config.getboolean('ec2', 'rds')
# Include ElastiCache instances?
self.elasticache_enabled = True
if config.has_option('ec2', 'elasticache'):
self.elasticache_enabled = config.getboolean('ec2', 'elasticache')
# Return all EC2 instances?
if config.has_option('ec2', 'all_instances'):
self.all_instances = config.getboolean('ec2', 'all_instances')
else:
self.all_instances = False
# Instance states to be gathered in inventory. Default is 'running'.
# Setting 'all_instances' to 'yes' overrides this option.
ec2_valid_instance_states = [
'pending',
'running',
'shutting-down',
'terminated',
'stopping',
'stopped'
]
self.ec2_instance_states = []
if self.all_instances:
self.ec2_instance_states = ec2_valid_instance_states
elif config.has_option('ec2', 'instance_states'):
for instance_state in config.get('ec2', 'instance_states').split(','):
instance_state = instance_state.strip()
if instance_state not in ec2_valid_instance_states:
continue
self.ec2_instance_states.append(instance_state)
else:
self.ec2_instance_states = ['running']
# Return all RDS instances? (if RDS is enabled)
if config.has_option('ec2', 'all_rds_instances') and self.rds_enabled:
self.all_rds_instances = config.getboolean('ec2', 'all_rds_instances')
else:
self.all_rds_instances = False
# Return all ElastiCache replication groups? (if ElastiCache is enabled)
if config.has_option('ec2', 'all_elasticache_replication_groups') and self.elasticache_enabled:
self.all_elasticache_replication_groups = config.getboolean('ec2', 'all_elasticache_replication_groups')
else:
self.all_elasticache_replication_groups = False
# Return all ElastiCache clusters? (if ElastiCache is enabled)
if config.has_option('ec2', 'all_elasticache_clusters') and self.elasticache_enabled:
self.all_elasticache_clusters = config.getboolean('ec2', 'all_elasticache_clusters')
else:
self.all_elasticache_clusters = False
# Return all ElastiCache nodes? (if ElastiCache is enabled)
if config.has_option('ec2', 'all_elasticache_nodes') and self.elasticache_enabled:
self.all_elasticache_nodes = config.getboolean('ec2', 'all_elasticache_nodes')
else:
self.all_elasticache_nodes = False
# boto configuration profile (prefer CLI argument)
self.boto_profile = self.args.boto_profile
if config.has_option('ec2', 'boto_profile') and not self.boto_profile:
self.boto_profile = config.get('ec2', 'boto_profile')
# Cache related
cache_dir = os.path.expanduser(config.get('ec2', 'cache_path'))
if self.boto_profile:
cache_dir = os.path.join(cache_dir, 'profile_' + self.boto_profile)
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
self.cache_path_cache = cache_dir + "/ansible-ec2.cache"
self.cache_path_index = cache_dir + "/ansible-ec2.index"
self.cache_max_age = config.getint('ec2', 'cache_max_age')
# Configure nested groups instead of flat namespace.
if config.has_option('ec2', 'nested_groups'):
self.nested_groups = config.getboolean('ec2', 'nested_groups')
else:
self.nested_groups = False
# Configure which groups should be created.
group_by_options = [
'group_by_instance_id',
'group_by_region',
'group_by_availability_zone',
'group_by_ami_id',
'group_by_instance_type',
'group_by_key_pair',
'group_by_vpc_id',
'group_by_security_group',
'group_by_tag_keys',
'group_by_tag_none',
'group_by_route53_names',
'group_by_rds_engine',
'group_by_rds_parameter_group',
'group_by_elasticache_engine',
'group_by_elasticache_cluster',
'group_by_elasticache_parameter_group',
'group_by_elasticache_replication_group',
]
for option in group_by_options:
if config.has_option('ec2', option):
setattr(self, option, config.getboolean('ec2', option))
else:
setattr(self, option, True)
# Do we need to just include hosts that match a pattern?
try:
pattern_include = config.get('ec2', 'pattern_include')
if pattern_include and len(pattern_include) > 0:
self.pattern_include = re.compile(pattern_include)
else:
self.pattern_include = None
except configparser.NoOptionError as e:
self.pattern_include = None
# Do we need to exclude hosts that match a pattern?
try:
pattern_exclude = config.get('ec2', 'pattern_exclude');
if pattern_exclude and len(pattern_exclude) > 0:
self.pattern_exclude = re.compile(pattern_exclude)
else:
self.pattern_exclude = None
except configparser.NoOptionError as e:
self.pattern_exclude = None
# Instance filters (see boto and EC2 API docs). Ignore invalid filters.
self.ec2_instance_filters = defaultdict(list)
if config.has_option('ec2', 'instance_filters'):
for instance_filter in config.get('ec2', 'instance_filters', '').split(','):
instance_filter = instance_filter.strip()
if not instance_filter or '=' not in instance_filter:
continue
filter_key, filter_value = [x.strip() for x in instance_filter.split('=', 1)]
if not filter_key:
continue
self.ec2_instance_filters[filter_key].append(filter_value)
def parse_cli_args(self):
''' Command line argument processing '''
parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on EC2')
parser.add_argument('--list', action='store_true', default=True,
help='List instances (default: True)')
parser.add_argument('--host', action='store',
help='Get all the variables about a specific instance')
parser.add_argument('--refresh-cache', action='store_true', default=False,
help='Force refresh of cache by making API requests to EC2 (default: False - use cache files)')
parser.add_argument('--boto-profile', action='store',
help='Use boto profile for connections to EC2')
self.args = parser.parse_args()
def do_api_calls_update_cache(self):
''' Do API calls to each region, and save data in cache files '''
if self.route53_enabled:
self.get_route53_records()
for region in self.regions:
self.get_instances_by_region(region)
if self.rds_enabled:
self.get_rds_instances_by_region(region)
if self.elasticache_enabled:
self.get_elasticache_clusters_by_region(region)
self.get_elasticache_replication_groups_by_region(region)
self.write_to_cache(self.inventory, self.cache_path_cache)
self.write_to_cache(self.index, self.cache_path_index)
def connect(self, region):
''' create connection to api server'''
if self.eucalyptus:
conn = boto.connect_euca(host=self.eucalyptus_host)
conn.APIVersion = '2010-08-31'
else:
conn = self.connect_to_aws(ec2, region)
return conn
def boto_fix_security_token_in_profile(self, connect_args):
''' monkey patch for boto issue boto/boto#2100 '''
profile = 'profile ' + self.boto_profile
if boto.config.has_option(profile, 'aws_security_token'):
connect_args['security_token'] = boto.config.get(profile, 'aws_security_token')
return connect_args
def connect_to_aws(self, module, region):
connect_args = {}
# only pass the profile name if it's set (as it is not supported by older boto versions)
if self.boto_profile:
connect_args['profile_name'] = self.boto_profile
self.boto_fix_security_token_in_profile(connect_args)
conn = module.connect_to_region(region, **connect_args)
# connect_to_region will fail "silently" by returning None if the region name is wrong or not supported
if conn is None:
self.fail_with_error("region name: %s likely not supported, or AWS is down. connection to region failed." % region)
return conn
def get_instances_by_region(self, region):
''' Makes an AWS EC2 API call to the list of instances in a particular
region '''
try:
conn = self.connect(region)
reservations = []
if self.ec2_instance_filters:
for filter_key, filter_values in self.ec2_instance_filters.items():
reservations.extend(conn.get_all_instances(filters = { filter_key : filter_values }))
else:
reservations = conn.get_all_instances()
for reservation in reservations:
for instance in reservation.instances:
self.add_instance(instance, region)
except boto.exception.BotoServerError as e:
if e.error_code == 'AuthFailure':
error = self.get_auth_error_message()
else:
backend = 'Eucalyptus' if self.eucalyptus else 'AWS'
error = "Error connecting to %s backend.\n%s" % (backend, e.message)
self.fail_with_error(error, 'getting EC2 instances')
def get_rds_instances_by_region(self, region):
''' Makes an AWS API call to the list of RDS instances in a particular
region '''
try:
conn = self.connect_to_aws(rds, region)
if conn:
instances = conn.get_all_dbinstances()
for instance in instances:
self.add_rds_instance(instance, region)
except boto.exception.BotoServerError as e:
error = e.reason
if e.error_code == 'AuthFailure':
error = self.get_auth_error_message()
if not e.reason == "Forbidden":
error = "Looks like AWS RDS is down:\n%s" % e.message
self.fail_with_error(error, 'getting RDS instances')
def get_elasticache_clusters_by_region(self, region):
''' Makes an AWS API call to the list of ElastiCache clusters (with
nodes' info) in a particular region.'''
# ElastiCache boto module doesn't provide a get_all_intances method,
# that's why we need to call describe directly (it would be called by
# the shorthand method anyway...)
try:
conn = elasticache.connect_to_region(region)
if conn:
# show_cache_node_info = True
# because we also want nodes' information
response = conn.describe_cache_clusters(None, None, None, True)
except boto.exception.BotoServerError as e:
error = e.reason
if e.error_code == 'AuthFailure':
error = self.get_auth_error_message()
if not e.reason == "Forbidden":
error = "Looks like AWS ElastiCache is down:\n%s" % e.message
self.fail_with_error(error, 'getting ElastiCache clusters')
try:
# Boto also doesn't provide wrapper classes to CacheClusters or
# CacheNodes. Because of that wo can't make use of the get_list
# method in the AWSQueryConnection. Let's do the work manually
clusters = response['DescribeCacheClustersResponse']['DescribeCacheClustersResult']['CacheClusters']
except KeyError as e:
error = "ElastiCache query to AWS failed (unexpected format)."
self.fail_with_error(error, 'getting ElastiCache clusters')
for cluster in clusters:
self.add_elasticache_cluster(cluster, region)
def get_elasticache_replication_groups_by_region(self, region):
''' Makes an AWS API call to the list of ElastiCache replication groups
in a particular region.'''
# ElastiCache boto module doesn't provide a get_all_intances method,
# that's why we need to call describe directly (it would be called by
# the shorthand method anyway...)
try:
conn = elasticache.connect_to_region(region)
if conn:
response = conn.describe_replication_groups()
except boto.exception.BotoServerError as e:
error = e.reason
if e.error_code == 'AuthFailure':
error = self.get_auth_error_message()
if not e.reason == "Forbidden":
error = "Looks like AWS ElastiCache [Replication Groups] is down:\n%s" % e.message
self.fail_with_error(error, 'getting ElastiCache clusters')
try:
# Boto also doesn't provide wrapper classes to ReplicationGroups
# Because of that wo can't make use of the get_list method in the
# AWSQueryConnection. Let's do the work manually
replication_groups = response['DescribeReplicationGroupsResponse']['DescribeReplicationGroupsResult']['ReplicationGroups']
except KeyError as e:
error = "ElastiCache [Replication Groups] query to AWS failed (unexpected format)."
self.fail_with_error(error, 'getting ElastiCache clusters')
for replication_group in replication_groups:
self.add_elasticache_replication_group(replication_group, region)
def get_auth_error_message(self):
''' create an informative error message if there is an issue authenticating'''
errors = ["Authentication error retrieving ec2 inventory."]
if None in [os.environ.get('AWS_ACCESS_KEY_ID'), os.environ.get('AWS_SECRET_ACCESS_KEY')]:
errors.append(' - No AWS_ACCESS_KEY_ID or AWS_SECRET_ACCESS_KEY environment vars found')
else:
errors.append(' - AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY environment vars found but may not be correct')
boto_paths = ['/etc/boto.cfg', '~/.boto', '~/.aws/credentials']
boto_config_found = list(p for p in boto_paths if os.path.isfile(os.path.expanduser(p)))
if len(boto_config_found) > 0:
errors.append(" - Boto configs found at '%s', but the credentials contained may not be correct" % ', '.join(boto_config_found))
else:
errors.append(" - No Boto config found at any expected location '%s'" % ', '.join(boto_paths))
return '\n'.join(errors)
def fail_with_error(self, err_msg, err_operation=None):
'''log an error to std err for ansible-playbook to consume and exit'''
if err_operation:
err_msg = 'ERROR: "{err_msg}", while: {err_operation}'.format(
err_msg=err_msg, err_operation=err_operation)
sys.stderr.write(err_msg)
sys.exit(1)
def get_instance(self, region, instance_id):
conn = self.connect(region)
reservations = conn.get_all_instances([instance_id])
for reservation in reservations:
for instance in reservation.instances:
return instance
def add_instance(self, instance, region):
''' Adds an instance to the inventory and index, as long as it is
addressable '''
# Only return instances with desired instance states
if instance.state not in self.ec2_instance_states:
return
# Select the best destination address
if instance.subnet_id:
dest = getattr(instance, self.vpc_destination_variable, None)
if dest is None:
dest = getattr(instance, 'tags').get(self.vpc_destination_variable, None)
else:
dest = getattr(instance, self.destination_variable, None)
if dest is None:
dest = getattr(instance, 'tags').get(self.destination_variable, None)
if not dest:
# Skip instances we cannot address (e.g. private VPC subnet)
return
# if we only want to include hosts that match a pattern, skip those that don't
if self.pattern_include and not self.pattern_include.match(dest):
return
# if we need to exclude hosts that match a pattern, skip those
if self.pattern_exclude and self.pattern_exclude.match(dest):
return
# Add to index
self.index[dest] = [region, instance.id]
# Inventory: Group by instance ID (always a group of 1)
if self.group_by_instance_id:
self.inventory[instance.id] = [dest]
if self.nested_groups:
self.push_group(self.inventory, 'instances', instance.id)
# Inventory: Group by region
if self.group_by_region:
self.push(self.inventory, region, dest)
if self.nested_groups:
self.push_group(self.inventory, 'regions', region)
# Inventory: Group by availability zone
if self.group_by_availability_zone:
self.push(self.inventory, instance.placement, dest)
if self.nested_groups:
if self.group_by_region:
self.push_group(self.inventory, region, instance.placement)
self.push_group(self.inventory, 'zones', instance.placement)
# Inventory: Group by Amazon Machine Image (AMI) ID
if self.group_by_ami_id:
ami_id = self.to_safe(instance.image_id)
self.push(self.inventory, ami_id, dest)
if self.nested_groups:
self.push_group(self.inventory, 'images', ami_id)
# Inventory: Group by instance type
if self.group_by_instance_type:
type_name = self.to_safe('type_' + instance.instance_type)
self.push(self.inventory, type_name, dest)
if self.nested_groups:
self.push_group(self.inventory, 'types', type_name)
# Inventory: Group by key pair
if self.group_by_key_pair and instance.key_name:
key_name = self.to_safe('key_' + instance.key_name)
self.push(self.inventory, key_name, dest)
if self.nested_groups:
self.push_group(self.inventory, 'keys', key_name)
# Inventory: Group by VPC
if self.group_by_vpc_id and instance.vpc_id:
vpc_id_name = self.to_safe('vpc_id_' + instance.vpc_id)
self.push(self.inventory, vpc_id_name, dest)
if self.nested_groups:
self.push_group(self.inventory, 'vpcs', vpc_id_name)
# Inventory: Group by security group
if self.group_by_security_group:
try:
for group in instance.groups:
key = self.to_safe("security_group_" + group.name)
self.push(self.inventory, key, dest)
if self.nested_groups:
self.push_group(self.inventory, 'security_groups', key)
except AttributeError:
self.fail_with_error('\n'.join(['Package boto seems a bit older.',
'Please upgrade boto >= 2.3.0.']))
# Inventory: Group by tag keys
if self.group_by_tag_keys:
for k, v in instance.tags.items():
if v:
key = self.to_safe("tag_" + k + "=" + v)
else:
key = self.to_safe("tag_" + k)
self.push(self.inventory, key, dest)
if self.nested_groups:
self.push_group(self.inventory, 'tags', self.to_safe("tag_" + k))
self.push_group(self.inventory, self.to_safe("tag_" + k), key)
# Inventory: Group by Route53 domain names if enabled
if self.route53_enabled and self.group_by_route53_names:
route53_names = self.get_instance_route53_names(instance)
for name in route53_names:
self.push(self.inventory, name, dest)
if self.nested_groups:
self.push_group(self.inventory, 'route53', name)
# Global Tag: instances without tags
if self.group_by_tag_none and len(instance.tags) == 0:
self.push(self.inventory, 'tag_none', dest)
if self.nested_groups:
self.push_group(self.inventory, 'tags', 'tag_none')
# Global Tag: tag all EC2 instances
self.push(self.inventory, 'ec2', dest)
self.inventory["_meta"]["hostvars"][dest] = self.get_host_info_dict_from_instance(instance)
def add_rds_instance(self, instance, region):
''' Adds an RDS instance to the inventory and index, as long as it is
addressable '''
# Only want available instances unless all_rds_instances is True
if not self.all_rds_instances and instance.status != 'available':
return
# Select the best destination address
dest = instance.endpoint[0]
if not dest:
# Skip instances we cannot address (e.g. private VPC subnet)
return
# Add to index
self.index[dest] = [region, instance.id]
# Inventory: Group by instance ID (always a group of 1)
if self.group_by_instance_id:
self.inventory[instance.id] = [dest]
if self.nested_groups:
self.push_group(self.inventory, 'instances', instance.id)
# Inventory: Group by region
if self.group_by_region:
self.push(self.inventory, region, dest)
if self.nested_groups:
self.push_group(self.inventory, 'regions', region)
# Inventory: Group by availability zone
if self.group_by_availability_zone:
self.push(self.inventory, instance.availability_zone, dest)
if self.nested_groups:
if self.group_by_region:
self.push_group(self.inventory, region, instance.availability_zone)
self.push_group(self.inventory, 'zones', instance.availability_zone)
# Inventory: Group by instance type
if self.group_by_instance_type:
type_name = self.to_safe('type_' + instance.instance_class)
self.push(self.inventory, type_name, dest)
if self.nested_groups:
self.push_group(self.inventory, 'types', type_name)
# Inventory: Group by VPC
if self.group_by_vpc_id and instance.subnet_group and instance.subnet_group.vpc_id:
vpc_id_name = self.to_safe('vpc_id_' + instance.subnet_group.vpc_id)
self.push(self.inventory, vpc_id_name, dest)
if self.nested_groups:
self.push_group(self.inventory, 'vpcs', vpc_id_name)
# Inventory: Group by security group
if self.group_by_security_group:
try:
if instance.security_group:
key = self.to_safe("security_group_" + instance.security_group.name)
self.push(self.inventory, key, dest)
if self.nested_groups:
self.push_group(self.inventory, 'security_groups', key)
except AttributeError:
self.fail_with_error('\n'.join(['Package boto seems a bit older.',
'Please upgrade boto >= 2.3.0.']))
# Inventory: Group by engine
if self.group_by_rds_engine:
self.push(self.inventory, self.to_safe("rds_" + instance.engine), dest)
if self.nested_groups:
self.push_group(self.inventory, 'rds_engines', self.to_safe("rds_" + instance.engine))
# Inventory: Group by parameter group
if self.group_by_rds_parameter_group:
self.push(self.inventory, self.to_safe("rds_parameter_group_" + instance.parameter_group.name), dest)
if self.nested_groups:
self.push_group(self.inventory, 'rds_parameter_groups', self.to_safe("rds_parameter_group_" + instance.parameter_group.name))
# Global Tag: all RDS instances
self.push(self.inventory, 'rds', dest)
self.inventory["_meta"]["hostvars"][dest] = self.get_host_info_dict_from_instance(instance)
def add_elasticache_cluster(self, cluster, region):
''' Adds an ElastiCache cluster to the inventory and index, as long as
it's nodes are addressable '''
# Only want available clusters unless all_elasticache_clusters is True
if not self.all_elasticache_clusters and cluster['CacheClusterStatus'] != 'available':
return
# Select the best destination address
if 'ConfigurationEndpoint' in cluster and cluster['ConfigurationEndpoint']:
# Memcached cluster
dest = cluster['ConfigurationEndpoint']['Address']
is_redis = False
else:
# Redis sigle node cluster
# Because all Redis clusters are single nodes, we'll merge the
# info from the cluster with info about the node
dest = cluster['CacheNodes'][0]['Endpoint']['Address']
is_redis = True
if not dest:
# Skip clusters we cannot address (e.g. private VPC subnet)
return
# Add to index
self.index[dest] = [region, cluster['CacheClusterId']]
# Inventory: Group by instance ID (always a group of 1)
if self.group_by_instance_id:
self.inventory[cluster['CacheClusterId']] = [dest]
if self.nested_groups:
self.push_group(self.inventory, 'instances', cluster['CacheClusterId'])
# Inventory: Group by region
if self.group_by_region and not is_redis:
self.push(self.inventory, region, dest)
if self.nested_groups:
self.push_group(self.inventory, 'regions', region)
# Inventory: Group by availability zone
if self.group_by_availability_zone and not is_redis:
self.push(self.inventory, cluster['PreferredAvailabilityZone'], dest)
if self.nested_groups:
if self.group_by_region:
self.push_group(self.inventory, region, cluster['PreferredAvailabilityZone'])
self.push_group(self.inventory, 'zones', cluster['PreferredAvailabilityZone'])
# Inventory: Group by node type
if self.group_by_instance_type and not is_redis:
type_name = self.to_safe('type_' + cluster['CacheNodeType'])
self.push(self.inventory, type_name, dest)
if self.nested_groups:
self.push_group(self.inventory, 'types', type_name)
# Inventory: Group by VPC (information not available in the current
# AWS API version for ElastiCache)
# Inventory: Group by security group
if self.group_by_security_group and not is_redis:
# Check for the existence of the 'SecurityGroups' key and also if
# this key has some value. When the cluster is not placed in a SG
# the query can return None here and cause an error.
if 'SecurityGroups' in cluster and cluster['SecurityGroups'] is not None:
for security_group in cluster['SecurityGroups']:
key = self.to_safe("security_group_" + security_group['SecurityGroupId'])
self.push(self.inventory, key, dest)
if self.nested_groups:
self.push_group(self.inventory, 'security_groups', key)
# Inventory: Group by engine
if self.group_by_elasticache_engine and not is_redis:
self.push(self.inventory, self.to_safe("elasticache_" + cluster['Engine']), dest)
if self.nested_groups:
self.push_group(self.inventory, 'elasticache_engines', self.to_safe(cluster['Engine']))
# Inventory: Group by parameter group
if self.group_by_elasticache_parameter_group:
self.push(self.inventory, self.to_safe("elasticache_parameter_group_" + cluster['CacheParameterGroup']['CacheParameterGroupName']), dest)
if self.nested_groups:
self.push_group(self.inventory, 'elasticache_parameter_groups', self.to_safe(cluster['CacheParameterGroup']['CacheParameterGroupName']))
# Inventory: Group by replication group
if self.group_by_elasticache_replication_group and 'ReplicationGroupId' in cluster and cluster['ReplicationGroupId']:
self.push(self.inventory, self.to_safe("elasticache_replication_group_" + cluster['ReplicationGroupId']), dest)
if self.nested_groups:
self.push_group(self.inventory, 'elasticache_replication_groups', self.to_safe(cluster['ReplicationGroupId']))
# Global Tag: all ElastiCache clusters
self.push(self.inventory, 'elasticache_clusters', cluster['CacheClusterId'])
host_info = self.get_host_info_dict_from_describe_dict(cluster)
self.inventory["_meta"]["hostvars"][dest] = host_info
# Add the nodes
for node in cluster['CacheNodes']:
self.add_elasticache_node(node, cluster, region)
def add_elasticache_node(self, node, cluster, region):
''' Adds an ElastiCache node to the inventory and index, as long as
it is addressable '''
# Only want available nodes unless all_elasticache_nodes is True
if not self.all_elasticache_nodes and node['CacheNodeStatus'] != 'available':
return
# Select the best destination address
dest = node['Endpoint']['Address']
if not dest:
# Skip nodes we cannot address (e.g. private VPC subnet)
return
node_id = self.to_safe(cluster['CacheClusterId'] + '_' + node['CacheNodeId'])
# Add to index
self.index[dest] = [region, node_id]
# Inventory: Group by node ID (always a group of 1)
if self.group_by_instance_id:
self.inventory[node_id] = [dest]
if self.nested_groups:
self.push_group(self.inventory, 'instances', node_id)
# Inventory: Group by region
if self.group_by_region:
self.push(self.inventory, region, dest)
if self.nested_groups:
self.push_group(self.inventory, 'regions', region)
# Inventory: Group by availability zone
if self.group_by_availability_zone:
self.push(self.inventory, cluster['PreferredAvailabilityZone'], dest)
if self.nested_groups:
if self.group_by_region:
self.push_group(self.inventory, region, cluster['PreferredAvailabilityZone'])
self.push_group(self.inventory, 'zones', cluster['PreferredAvailabilityZone'])
# Inventory: Group by node type
if self.group_by_instance_type:
type_name = self.to_safe('type_' + cluster['CacheNodeType'])
self.push(self.inventory, type_name, dest)
if self.nested_groups:
self.push_group(self.inventory, 'types', type_name)
# Inventory: Group by VPC (information not available in the current
# AWS API version for ElastiCache)
# Inventory: Group by security group
if self.group_by_security_group:
# Check for the existence of the 'SecurityGroups' key and also if
# this key has some value. When the cluster is not placed in a SG
# the query can return None here and cause an error.
if 'SecurityGroups' in cluster and cluster['SecurityGroups'] is not None:
for security_group in cluster['SecurityGroups']:
key = self.to_safe("security_group_" + security_group['SecurityGroupId'])
self.push(self.inventory, key, dest)
if self.nested_groups:
self.push_group(self.inventory, 'security_groups', key)
# Inventory: Group by engine
if self.group_by_elasticache_engine:
self.push(self.inventory, self.to_safe("elasticache_" + cluster['Engine']), dest)
if self.nested_groups:
self.push_group(self.inventory, 'elasticache_engines', self.to_safe("elasticache_" + cluster['Engine']))
# Inventory: Group by parameter group (done at cluster level)
# Inventory: Group by replication group (done at cluster level)
# Inventory: Group by ElastiCache Cluster
if self.group_by_elasticache_cluster:
self.push(self.inventory, self.to_safe("elasticache_cluster_" + cluster['CacheClusterId']), dest)
# Global Tag: all ElastiCache nodes
self.push(self.inventory, 'elasticache_nodes', dest)
host_info = self.get_host_info_dict_from_describe_dict(node)
if dest in self.inventory["_meta"]["hostvars"]:
self.inventory["_meta"]["hostvars"][dest].update(host_info)
else:
self.inventory["_meta"]["hostvars"][dest] = host_info
def add_elasticache_replication_group(self, replication_group, region):
''' Adds an ElastiCache replication group to the inventory and index '''
# Only want available clusters unless all_elasticache_replication_groups is True
if not self.all_elasticache_replication_groups and replication_group['Status'] != 'available':
return
# Select the best destination address (PrimaryEndpoint)
dest = replication_group['NodeGroups'][0]['PrimaryEndpoint']['Address']
if not dest:
# Skip clusters we cannot address (e.g. private VPC subnet)
return
# Add to index
self.index[dest] = [region, replication_group['ReplicationGroupId']]
# Inventory: Group by ID (always a group of 1)
if self.group_by_instance_id:
self.inventory[replication_group['ReplicationGroupId']] = [dest]
if self.nested_groups:
self.push_group(self.inventory, 'instances', replication_group['ReplicationGroupId'])
# Inventory: Group by region
if self.group_by_region:
self.push(self.inventory, region, dest)
if self.nested_groups:
self.push_group(self.inventory, 'regions', region)
# Inventory: Group by availability zone (doesn't apply to replication groups)
# Inventory: Group by node type (doesn't apply to replication groups)
# Inventory: Group by VPC (information not available in the current
# AWS API version for replication groups
# Inventory: Group by security group (doesn't apply to replication groups)
# Check this value in cluster level
# Inventory: Group by engine (replication groups are always Redis)
if self.group_by_elasticache_engine:
self.push(self.inventory, 'elasticache_redis', dest)
if self.nested_groups:
self.push_group(self.inventory, 'elasticache_engines', 'redis')
# Global Tag: all ElastiCache clusters
self.push(self.inventory, 'elasticache_replication_groups', replication_group['ReplicationGroupId'])
host_info = self.get_host_info_dict_from_describe_dict(replication_group)
self.inventory["_meta"]["hostvars"][dest] = host_info
def get_route53_records(self):
''' Get and store the map of resource records to domain names that
point to them. '''
r53_conn = route53.Route53Connection()
all_zones = r53_conn.get_zones()
route53_zones = [ zone for zone in all_zones if zone.name[:-1]
not in self.route53_excluded_zones ]
self.route53_records = {}
for zone in route53_zones:
rrsets = r53_conn.get_all_rrsets(zone.id)
for record_set in rrsets:
record_name = record_set.name
if record_name.endswith('.'):
record_name = record_name[:-1]
for resource in record_set.resource_records:
self.route53_records.setdefault(resource, set())
self.route53_records[resource].add(record_name)
def get_instance_route53_names(self, instance):
''' Check if an instance is referenced in the records we have from
Route53. If it is, return the list of domain names pointing to said
instance. If nothing points to it, return an empty list. '''
instance_attributes = [ 'public_dns_name', 'private_dns_name',
'ip_address', 'private_ip_address' ]
name_list = set()
for attrib in instance_attributes:
try:
value = getattr(instance, attrib)
except AttributeError:
continue
if value in self.route53_records:
name_list.update(self.route53_records[value])
return list(name_list)
def get_host_info_dict_from_instance(self, instance):
instance_vars = {}
for key in vars(instance):
value = getattr(instance, key)
key = self.to_safe('ec2_' + key)
# Handle complex types
# state/previous_state changed to properties in boto in https://github.com/boto/boto/commit/a23c379837f698212252720d2af8dec0325c9518
if key == 'ec2__state':
instance_vars['ec2_state'] = instance.state or ''
instance_vars['ec2_state_code'] = instance.state_code
elif key == 'ec2__previous_state':
instance_vars['ec2_previous_state'] = instance.previous_state or ''
instance_vars['ec2_previous_state_code'] = instance.previous_state_code
elif type(value) in [int, bool]:
instance_vars[key] = value
elif isinstance(value, six.string_types):
instance_vars[key] = value.strip()
elif type(value) == type(None):
instance_vars[key] = ''
elif key == 'ec2_region':
instance_vars[key] = value.name
elif key == 'ec2__placement':
instance_vars['ec2_placement'] = value.zone
elif key == 'ec2_tags':
for k, v in value.items():
key = self.to_safe('ec2_tag_' + k)
instance_vars[key] = v
elif key == 'ec2_groups':
group_ids = []
group_names = []
for group in value:
group_ids.append(group.id)
group_names.append(group.name)
instance_vars["ec2_security_group_ids"] = ','.join([str(i) for i in group_ids])
instance_vars["ec2_security_group_names"] = ','.join([str(i) for i in group_names])
else:
pass
# TODO Product codes if someone finds them useful
#print key
#print type(value)
#print value
return instance_vars
def get_host_info_dict_from_describe_dict(self, describe_dict):
''' Parses the dictionary returned by the API call into a flat list
of parameters. This method should be used only when 'describe' is
used directly because Boto doesn't provide specific classes. '''
# I really don't agree with prefixing everything with 'ec2'
# because EC2, RDS and ElastiCache are different services.
# I'm just following the pattern used until now to not break any
# compatibility.
host_info = {}
for key in describe_dict:
value = describe_dict[key]
key = self.to_safe('ec2_' + self.uncammelize(key))
# Handle complex types
# Target: Memcached Cache Clusters
if key == 'ec2_configuration_endpoint' and value:
host_info['ec2_configuration_endpoint_address'] = value['Address']
host_info['ec2_configuration_endpoint_port'] = value['Port']
# Target: Cache Nodes and Redis Cache Clusters (single node)
if key == 'ec2_endpoint' and value:
host_info['ec2_endpoint_address'] = value['Address']
host_info['ec2_endpoint_port'] = value['Port']
# Target: Redis Replication Groups
if key == 'ec2_node_groups' and value:
host_info['ec2_endpoint_address'] = value[0]['PrimaryEndpoint']['Address']
host_info['ec2_endpoint_port'] = value[0]['PrimaryEndpoint']['Port']
replica_count = 0
for node in value[0]['NodeGroupMembers']:
if node['CurrentRole'] == 'primary':
host_info['ec2_primary_cluster_address'] = node['ReadEndpoint']['Address']
host_info['ec2_primary_cluster_port'] = node['ReadEndpoint']['Port']
host_info['ec2_primary_cluster_id'] = node['CacheClusterId']
elif node['CurrentRole'] == 'replica':
host_info['ec2_replica_cluster_address_'+ str(replica_count)] = node['ReadEndpoint']['Address']
host_info['ec2_replica_cluster_port_'+ str(replica_count)] = node['ReadEndpoint']['Port']
host_info['ec2_replica_cluster_id_'+ str(replica_count)] = node['CacheClusterId']
replica_count += 1
# Target: Redis Replication Groups
if key == 'ec2_member_clusters' and value:
host_info['ec2_member_clusters'] = ','.join([str(i) for i in value])
# Target: All Cache Clusters
elif key == 'ec2_cache_parameter_group':
host_info["ec2_cache_node_ids_to_reboot"] = ','.join([str(i) for i in value['CacheNodeIdsToReboot']])
host_info['ec2_cache_parameter_group_name'] = value['CacheParameterGroupName']
host_info['ec2_cache_parameter_apply_status'] = value['ParameterApplyStatus']
# Target: Almost everything
elif key == 'ec2_security_groups':
# Skip if SecurityGroups is None
# (it is possible to have the key defined but no value in it).
if value is not None:
sg_ids = []
for sg in value:
sg_ids.append(sg['SecurityGroupId'])
host_info["ec2_security_group_ids"] = ','.join([str(i) for i in sg_ids])
# Target: Everything
# Preserve booleans and integers
elif type(value) in [int, bool]:
host_info[key] = value
# Target: Everything
# Sanitize string values
elif isinstance(value, six.string_types):
host_info[key] = value.strip()
# Target: Everything
# Replace None by an empty string
elif type(value) == type(None):
host_info[key] = ''
else:
# Remove non-processed complex types
pass
return host_info
def get_host_info(self):
''' Get variables about a specific host '''
if len(self.index) == 0:
# Need to load index from cache
self.load_index_from_cache()
if not self.args.host in self.index:
# try updating the cache
self.do_api_calls_update_cache()
if not self.args.host in self.index:
# host might not exist anymore
return self.json_format_dict({}, True)
(region, instance_id) = self.index[self.args.host]
instance = self.get_instance(region, instance_id)
return self.json_format_dict(self.get_host_info_dict_from_instance(instance), True)
def push(self, my_dict, key, element):
''' Push an element onto an array that may not have been defined in
the dict '''
group_info = my_dict.setdefault(key, [])
if isinstance(group_info, dict):
host_list = group_info.setdefault('hosts', [])
host_list.append(element)
else:
group_info.append(element)
def push_group(self, my_dict, key, element):
''' Push a group as a child of another group. '''
parent_group = my_dict.setdefault(key, {})
if not isinstance(parent_group, dict):
parent_group = my_dict[key] = {'hosts': parent_group}
child_groups = parent_group.setdefault('children', [])
if element not in child_groups:
child_groups.append(element)
def get_inventory_from_cache(self):
''' Reads the inventory from the cache file and returns it as a JSON
object '''
cache = open(self.cache_path_cache, 'r')
json_inventory = cache.read()
return json_inventory
def load_index_from_cache(self):
''' Reads the index from the cache file sets self.index '''
cache = open(self.cache_path_index, 'r')
json_index = cache.read()
self.index = json.loads(json_index)
def write_to_cache(self, data, filename):
''' Writes data in JSON format to a file '''
json_data = self.json_format_dict(data, True)
cache = open(filename, 'w')
cache.write(json_data)
cache.close()
def uncammelize(self, key):
temp = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', key)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', temp).lower()
def to_safe(self, word):
''' Converts 'bad' characters in a string to underscores so they can be
used as Ansible groups '''
return re.sub("[^A-Za-z0-9\_]", "_", word)
def json_format_dict(self, data, pretty=False):
''' Converts a dict to a JSON object and dumps it as a formatted
string '''
if pretty:
return json.dumps(data, sort_keys=True, indent=2)
else:
return json.dumps(data)
# Run the script
Ec2Inventory()
| gpl-3.0 |
PGHS-CP1A-2015/python_koans_kjhansen | python3/koans/about_dice_project.py | 14 | 1958 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from runner.koan import *
import random
class DiceSet:
def __init__(self):
self._values = None
@property
def values(self):
return self._values
def roll(self, n):
# Needs implementing!
# Tip: random.randint(min, max) can be used to generate random numbers
pass
class AboutDiceProject(Koan):
def test_can_create_a_dice_set(self):
dice = DiceSet()
self.assertTrue(dice)
def test_rolling_the_dice_returns_a_set_of_integers_between_1_and_6(self):
dice = DiceSet()
dice.roll(5)
self.assertTrue(isinstance(dice.values, list), "should be a list")
self.assertEqual(5, len(dice.values))
for value in dice.values:
self.assertTrue(value >= 1 and value <= 6, "value " + str(value) + " must be between 1 and 6")
def test_dice_values_do_not_change_unless_explicitly_rolled(self):
dice = DiceSet()
dice.roll(5)
first_time = dice.values
second_time = dice.values
self.assertEqual(first_time, second_time)
def test_dice_values_should_change_between_rolls(self):
dice = DiceSet()
dice.roll(5)
first_time = dice.values
dice.roll(5)
second_time = dice.values
self.assertNotEqual(first_time, second_time, \
"Two rolls should not be equal")
# THINK ABOUT IT:
#
# If the rolls are random, then it is possible (although not
# likely) that two consecutive rolls are equal. What would be a
# better way to test this?
def test_you_can_roll_different_numbers_of_dice(self):
dice = DiceSet()
dice.roll(3)
self.assertEqual(3, len(dice.values))
dice.roll(1)
self.assertEqual(1, len(dice.values))
| mit |
zork9/pygame-pyMM | maproom1.py | 1 | 6299 |
# Copyright (c) 2013 Johan Ceuppens.
# All rights reserved.
# Redistribution and use in source and binary forms are permitted
# provided that the above copyright notice and this paragraph are
# duplicated in all such forms and that any documentation,
# advertising materials, and other materials related to such
# distribution and use acknowledge that the software was developed
# by the Johan Ceuppens. The name of the
# Johan Ceuppens may not be used to endorse or promote products derived
# from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
# Copyright (C) Johan Ceuppens 2010
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import pygame
from pygame.locals import *
from maproomdungeon import *
from bigsnail import *
from wall import *
from ladder import *
from imagebox import *
from skeletondrone import *
from greenscorpion import *
from rainman import *
from bombertoad import *
from toadman import *
class Maproom1(MaproomDungeon):
"Room with a (big) map"
def __init__(self,x,y):
MaproomDungeon.__init__(self,x,y)
self.background = pygame.image.load('./pics/bg2-2400x600.bmp').convert()
self.w = 2400
self.h = 480
self.mapoffsetx = 400
###self.northwall1 = Tilebox(1,1,60,48,16,1,'./pics/walldungeonnorth2-beholderglass-60x48.bmp')
## self.northwall1 = Tilebox(1,1,60,48,13,1,'./pics/walldungeonnorth1-60x48.bmp')
## self.southwall1 = Tilebox(1,200,30,48,13,1,'./pics/walldungeonsouth1-30x48.bmp')
## self.westwall1 = Tilebox(360,200,48,60,1,10,'./pics/walldungeonwest1-48x60.bmp')
## self.eastwall1 = Tilebox(775,1,48,60,1,14,'./pics/walldungeoneast1-48x60.bmp')
## self.tileboxes.append(self.northwall1)
## self.tileboxes.append(self.westwall1)
## self.tileboxes.append(self.eastwall1)
## self.tileboxes.append(self.southwall1)
self.gameobjects.append(BomberToad(2000,420))
self.gameobjects.append(Rainman(1000,140))
self.gameobjects.append(GreenScorpion(600,320))
self.gameobjects.append(GreenScorpion(700,320))
self.gameobjects.append(GreenScorpion(800,320))
self.gameobjects.append(GreenScorpion(1000,320))
self.gameobjects.append(BigSnail(830,92))
self.gameobjects.append(BigSnail(2425,600))
# left NOTE : boxes collide so put them after enemies !
### walls
self.addeastwall(2700,0,50,600,"./pics/wall-level1-50x500.bmp")
### roof box
self.gameobjects.append(ImageBox(0,50,2400,45,"./pics/platform-265x50-1.bmp"))
self.gameobjects.append(Box(0,400,1300,250))
self.gameobjects.append(Box(1280,460,300,25))### FIXME some 25 -> 250
self.gameobjects.append(Box(1580,550,300,25))
self.gameobjects.append(Box(1920,560,300,25))
self.gameobjects.append(Box(2250,560,150,25))
# First BigSnail sits here
self.gameobjects.append(ImageBox(0,200,265,25,"./pics/platform-265x50-1.bmp"))
# Second BigSnail sits here
self.gameobjects.append(ImageBox(2400,704,265,25,"./pics/platform-265x50-1.bmp"))
self.gameobjects.append(ImageBox(800,200,265,25,"./pics/platform-265x50-1.bmp"))
# ladders
# first part
self.ladders.append(Ladder(1000,200,20,71,"./pics/ladder-toadmanlevel-20x71.bmp"))
# second part
self.ladders.append(Ladder(1000,271,20,71,"./pics/ladder-toadmanlevel-20x71.bmp"))
def draw(self,screen,player):
# draw bg
screen.blit(self.background, (0+self.relativex, 0+self.relativey))
# draw walls
MaproomDungeon.draw(self,screen)
for t in self.tileboxes:
t.draw(screen,self.relativex,self.relativey)
#self.southwall1.draw(screen,self.relativex,self.relativey)
# draw gameobjects
for i in self.gameobjects:
if i != None:
i.update(self,player)
i.draw(screen,self)
for i in self.ladders:
if i != None:
i.update(self,player)
i.draw(screen,self)
def isroomdownexit(self):
if self.relativex < -2370:
return 1
return 0
def setxyfromdown(self):
self.relativex = 0
self.relativey = 0
def exit(self, game):
if self.isroomdownexit():
self.setxyfromdown()
return 2
return 0
def collidesword(self,player):
for i in self.gameobjects:
if i!= None:
id = i.collidewithsword(self,player)
#self.relativex = self.prevx
#self.relativey = self.prevy
return i ## NOTE : returns collided entity (single), put enemies before walls in gameobjects
return None
def collideswordlow(self,player):
for i in self.gameobjects:
if i!= None:
id = i.collidewithswordlow(self,player)
#self.relativex = self.prevx
#self.relativey = self.prevy
return i ## NOTE : returns collided entity (single), put enemies before walls in gameobjects
return None
def moveleft(self):
self.direction = "west"
self.prevx = self.relativex + 10
self.prevy = self.relativey
self.relativex = self.relativex - 10
#print "move map left %d" % self.relativex
### NOTE : the following code does not move a map window to the left,
### the player cannot go left
def moveright(self):
self.direction = "east"
self.prevx = self.relativex - 10
self.prevy = self.relativey
if self.relativex < 30:
self.relativex = self.relativex + 10
def removeobject(self, o):
for i in range(0,len(self.gameobjects)):
if self.gameobjects[i] == o:
self.gameobjects[i] = None
| gpl-2.0 |
gojira/tensorflow | tensorflow/contrib/data/python/kernel_tests/serialization/sequence_dataset_serialization_test.py | 14 | 4952 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the sequence datasets serialization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.data.python.kernel_tests.serialization import dataset_serialization_test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.platform import test
class SkipDatasetSerializationTest(
dataset_serialization_test_base.DatasetSerializationTestBase):
def _build_skip_dataset(self, count):
components = (np.arange(10),)
return dataset_ops.Dataset.from_tensor_slices(components).skip(count)
def testSkipFewerThanInputs(self):
count = 4
num_outputs = 10 - count
self.run_core_tests(lambda: self._build_skip_dataset(count),
lambda: self._build_skip_dataset(count + 2),
num_outputs)
def testSkipVarious(self):
# Skip more than inputs
self.run_core_tests(lambda: self._build_skip_dataset(20), None, 0)
# Skip exactly the input size
self.run_core_tests(lambda: self._build_skip_dataset(10), None, 0)
self.run_core_tests(lambda: self._build_skip_dataset(-1), None, 0)
# Skip nothing
self.run_core_tests(lambda: self._build_skip_dataset(0), None, 10)
def testInvalidSkip(self):
with self.assertRaisesRegexp(ValueError,
'Shape must be rank 0 but is rank 1'):
self.run_core_tests(lambda: self._build_skip_dataset([1, 2]), None, 0)
class TakeDatasetSerializationTest(
dataset_serialization_test_base.DatasetSerializationTestBase):
def _build_take_dataset(self, count):
components = (np.arange(10),)
return dataset_ops.Dataset.from_tensor_slices(components).take(count)
def testTakeFewerThanInputs(self):
count = 4
self.run_core_tests(
lambda: self._build_take_dataset(count),
lambda: self._build_take_dataset(count + 2),
count,
)
def testTakeVarious(self):
# Take more than inputs
self.run_core_tests(lambda: self._build_take_dataset(20), None, 10)
# Take exactly the input size
self.run_core_tests(lambda: self._build_take_dataset(10), None, 10)
# Take all
self.run_core_tests(lambda: self._build_take_dataset(-1), None, 10)
# Take nothing
self.run_core_tests(lambda: self._build_take_dataset(0), None, 0)
def testInvalidTake(self):
with self.assertRaisesRegexp(ValueError,
'Shape must be rank 0 but is rank 1'):
self.run_core_tests(lambda: self._build_take_dataset([1, 2]), None, 0)
class RepeatDatasetSerializationTest(
dataset_serialization_test_base.DatasetSerializationTestBase):
def _build_repeat_dataset(self, count, take_count=3):
components = (np.arange(10),)
return dataset_ops.Dataset.from_tensor_slices(components).take(
take_count).repeat(count)
def testFiniteRepeat(self):
count = 10
self.run_core_tests(lambda: self._build_repeat_dataset(count),
lambda: self._build_repeat_dataset(count + 2),
3 * count)
def testEmptyRepeat(self):
self.run_core_tests(lambda: self._build_repeat_dataset(0), None, 0)
def testInfiniteRepeat(self):
self.verify_unused_iterator(
lambda: self._build_repeat_dataset(-1), 10, verify_exhausted=False)
self.verify_init_before_restore(
lambda: self._build_repeat_dataset(-1), 10, verify_exhausted=False)
self.verify_multiple_breaks(
lambda: self._build_repeat_dataset(-1), 20, verify_exhausted=False)
self.verify_reset_restored_iterator(
lambda: self._build_repeat_dataset(-1), 20, verify_exhausted=False)
self.verify_restore_in_modified_graph(
lambda: self._build_repeat_dataset(-1),
lambda: self._build_repeat_dataset(2),
20,
verify_exhausted=False)
# Test repeat empty dataset
self.run_core_tests(lambda: self._build_repeat_dataset(-1, 0), None, 0)
def testInvalidRepeat(self):
with self.assertRaisesRegexp(
ValueError, 'Shape must be rank 0 but is rank 1'):
self.run_core_tests(lambda: self._build_repeat_dataset([1, 2], 0),
None, 0)
if __name__ == '__main__':
test.main()
| apache-2.0 |
colour-science/colour-demosaicing | colour_demosaicing/bayer/tests/test_mosaicing.py | 1 | 1598 | # !/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Defines the unit tests for the :mod:`colour_demosaicing.bayer.mosaicing`
module.
"""
import numpy as np
import os
import unittest
from colour import read_image
from colour_demosaicing import TESTS_RESOURCES_DIRECTORY
from colour_demosaicing.bayer import mosaicing_CFA_Bayer
__author__ = 'Colour Developers'
__copyright__ = 'Copyright (C) 2015-2021 - Colour Developers'
__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'Colour Developers'
__email__ = 'colour-developers@colour-science.org'
__status__ = 'Production'
__all__ = ['BAYER_DIRECTORY', 'TestMosaicing_CFA_Bayer']
BAYER_DIRECTORY = os.path.join(TESTS_RESOURCES_DIRECTORY, 'colour_demosaicing',
'bayer')
class TestMosaicing_CFA_Bayer(unittest.TestCase):
"""
Defines :func:`colour_demosaicing.bayer.mosaicing.mosaicing_CFA_Bayer`
definition unit tests methods.
"""
def test_mosaicing_CFA_Bayer(self):
"""
Tests :func:`colour_demosaicing.bayer.mosaicing.mosaicing_CFA_Bayer`
definition.
"""
image = read_image(
str(os.path.join(BAYER_DIRECTORY, 'Lighthouse.exr')))
for pattern in ('RGGB', 'BGGR', 'GRBG', 'GBRG'):
CFA = os.path.join(BAYER_DIRECTORY, 'Lighthouse_CFA_{0}.exr')
np.testing.assert_almost_equal(
mosaicing_CFA_Bayer(image, pattern),
read_image(str(CFA.format(pattern)))[..., 0],
decimal=7)
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
MericGarcia/deployment-mean | node_modules/meanio/node_modules/npm/node_modules/node-gyp/gyp/pylib/gyp/ninja_syntax.py | 2485 | 5536 | # This file comes from
# https://github.com/martine/ninja/blob/master/misc/ninja_syntax.py
# Do not edit! Edit the upstream one instead.
"""Python module for generating .ninja files.
Note that this is emphatically not a required piece of Ninja; it's
just a helpful utility for build-file-generation systems that already
use Python.
"""
import textwrap
import re
def escape_path(word):
return word.replace('$ ','$$ ').replace(' ','$ ').replace(':', '$:')
class Writer(object):
def __init__(self, output, width=78):
self.output = output
self.width = width
def newline(self):
self.output.write('\n')
def comment(self, text):
for line in textwrap.wrap(text, self.width - 2):
self.output.write('# ' + line + '\n')
def variable(self, key, value, indent=0):
if value is None:
return
if isinstance(value, list):
value = ' '.join(filter(None, value)) # Filter out empty strings.
self._line('%s = %s' % (key, value), indent)
def pool(self, name, depth):
self._line('pool %s' % name)
self.variable('depth', depth, indent=1)
def rule(self, name, command, description=None, depfile=None,
generator=False, pool=None, restat=False, rspfile=None,
rspfile_content=None, deps=None):
self._line('rule %s' % name)
self.variable('command', command, indent=1)
if description:
self.variable('description', description, indent=1)
if depfile:
self.variable('depfile', depfile, indent=1)
if generator:
self.variable('generator', '1', indent=1)
if pool:
self.variable('pool', pool, indent=1)
if restat:
self.variable('restat', '1', indent=1)
if rspfile:
self.variable('rspfile', rspfile, indent=1)
if rspfile_content:
self.variable('rspfile_content', rspfile_content, indent=1)
if deps:
self.variable('deps', deps, indent=1)
def build(self, outputs, rule, inputs=None, implicit=None, order_only=None,
variables=None):
outputs = self._as_list(outputs)
all_inputs = self._as_list(inputs)[:]
out_outputs = list(map(escape_path, outputs))
all_inputs = list(map(escape_path, all_inputs))
if implicit:
implicit = map(escape_path, self._as_list(implicit))
all_inputs.append('|')
all_inputs.extend(implicit)
if order_only:
order_only = map(escape_path, self._as_list(order_only))
all_inputs.append('||')
all_inputs.extend(order_only)
self._line('build %s: %s' % (' '.join(out_outputs),
' '.join([rule] + all_inputs)))
if variables:
if isinstance(variables, dict):
iterator = iter(variables.items())
else:
iterator = iter(variables)
for key, val in iterator:
self.variable(key, val, indent=1)
return outputs
def include(self, path):
self._line('include %s' % path)
def subninja(self, path):
self._line('subninja %s' % path)
def default(self, paths):
self._line('default %s' % ' '.join(self._as_list(paths)))
def _count_dollars_before_index(self, s, i):
"""Returns the number of '$' characters right in front of s[i]."""
dollar_count = 0
dollar_index = i - 1
while dollar_index > 0 and s[dollar_index] == '$':
dollar_count += 1
dollar_index -= 1
return dollar_count
def _line(self, text, indent=0):
"""Write 'text' word-wrapped at self.width characters."""
leading_space = ' ' * indent
while len(leading_space) + len(text) > self.width:
# The text is too wide; wrap if possible.
# Find the rightmost space that would obey our width constraint and
# that's not an escaped space.
available_space = self.width - len(leading_space) - len(' $')
space = available_space
while True:
space = text.rfind(' ', 0, space)
if space < 0 or \
self._count_dollars_before_index(text, space) % 2 == 0:
break
if space < 0:
# No such space; just use the first unescaped space we can find.
space = available_space - 1
while True:
space = text.find(' ', space + 1)
if space < 0 or \
self._count_dollars_before_index(text, space) % 2 == 0:
break
if space < 0:
# Give up on breaking.
break
self.output.write(leading_space + text[0:space] + ' $\n')
text = text[space+1:]
# Subsequent lines are continuations, so indent them.
leading_space = ' ' * (indent+2)
self.output.write(leading_space + text + '\n')
def _as_list(self, input):
if input is None:
return []
if isinstance(input, list):
return input
return [input]
def escape(string):
"""Escape a string such that it can be embedded into a Ninja file without
further interpretation."""
assert '\n' not in string, 'Ninja syntax does not allow newlines'
# We only have one special metacharacter: '$'.
return string.replace('$', '$$')
| mit |
dhoffman34/django | tests/commands_sql/tests.py | 6 | 3478 | from __future__ import unicode_literals
from django.apps import apps
from django.core.management.color import no_style
from django.core.management.sql import (sql_create, sql_delete, sql_indexes,
sql_destroy_indexes, sql_all)
from django.db import connections, DEFAULT_DB_ALIAS, router
from django.test import TestCase
from django.utils import six
# See also initial_sql_regress for 'custom_sql_for_model' tests
class SQLCommandsTestCase(TestCase):
"""Tests for several functions in django/core/management/sql.py"""
def count_ddl(self, output, cmd):
return len([o for o in output if o.startswith(cmd)])
def test_sql_create(self):
app_config = apps.get_app_config('commands_sql')
output = sql_create(app_config, no_style(), connections[DEFAULT_DB_ALIAS])
create_tables = [o for o in output if o.startswith('CREATE TABLE')]
self.assertEqual(len(create_tables), 3)
# Lower so that Oracle's upper case tbl names wont break
sql = create_tables[-1].lower()
six.assertRegex(self, sql, r'^create table .commands_sql_book.*')
def test_sql_delete(self):
app_config = apps.get_app_config('commands_sql')
output = sql_delete(app_config, no_style(), connections[DEFAULT_DB_ALIAS], close_connection=False)
drop_tables = [o for o in output if o.startswith('DROP TABLE')]
self.assertEqual(len(drop_tables), 3)
# Lower so that Oracle's upper case tbl names wont break
sql = drop_tables[-1].lower()
six.assertRegex(self, sql, r'^drop table .commands_sql_comment.*')
def test_sql_indexes(self):
app_config = apps.get_app_config('commands_sql')
output = sql_indexes(app_config, no_style(), connections[DEFAULT_DB_ALIAS])
# PostgreSQL creates one additional index for CharField
self.assertIn(self.count_ddl(output, 'CREATE INDEX'), [3, 4])
def test_sql_destroy_indexes(self):
app_config = apps.get_app_config('commands_sql')
output = sql_destroy_indexes(app_config, no_style(), connections[DEFAULT_DB_ALIAS])
# PostgreSQL creates one additional index for CharField
self.assertIn(self.count_ddl(output, 'DROP INDEX'), [3, 4])
def test_sql_all(self):
app_config = apps.get_app_config('commands_sql')
output = sql_all(app_config, no_style(), connections[DEFAULT_DB_ALIAS])
self.assertEqual(self.count_ddl(output, 'CREATE TABLE'), 3)
# PostgreSQL creates one additional index for CharField
self.assertIn(self.count_ddl(output, 'CREATE INDEX'), [3, 4])
class TestRouter(object):
def allow_migrate(self, db, model):
return False
class SQLCommandsRouterTestCase(TestCase):
def setUp(self):
self._old_routers = router.routers
router.routers = [TestRouter()]
def tearDown(self):
router.routers = self._old_routers
def test_router_honored(self):
app_config = apps.get_app_config('commands_sql')
for sql_command in (sql_all, sql_create, sql_delete, sql_indexes, sql_destroy_indexes):
if sql_command is sql_delete:
output = sql_command(app_config, no_style(), connections[DEFAULT_DB_ALIAS], close_connection=False)
else:
output = sql_command(app_config, no_style(), connections[DEFAULT_DB_ALIAS])
self.assertEqual(len(output), 0,
"%s command is not honoring routers" % sql_command.__name__)
| bsd-3-clause |
Andreasdahlberg/sillycat | scripts/rfm69_check.py | 1 | 3685 | #!/usr/bin/env python3
# -*- coding: utf-8 -*
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import argparse
__author__ = 'andreas.dahlberg90@gmail.com (Andreas Dahlberg)'
__version__ = '0.1.0'
FXOSC = 32000000.0
FSTEP = FXOSC / (2^19)
def check_oversampling_rule(bitrate, channel_filter_bandwith):
return bitrate < channel_filter_bandwith / 2
def check_receiver_bandwidth_rule(channel_filter_bandwith, frequency_deviation):
return channel_filter_bandwith > 2 * frequency_deviation
def check_modulation_index_rule(bitrate, frequency_deviation):
return (2 * frequency_deviation / bitrate) >= 0.5 and (2 * frequency_deviation / bitrate) <= 10
def check_modulation_rule(bitrate, frequency_deviation):
return frequency_deviation + bitrate / 2 <= 500000
def check_frequency_deviation_rule(frequency_deviation):
return frequency_deviation > 600
def check_all_rules(bitrate, channel_filter_bandwith, frequency_deviation, verbose):
failed_checks = 0
if not check_oversampling_rule(bitrate, channel_filter_bandwith):
failed_checks += 1
if verbose:
print('* Oversampling rule violated')
print(' The bit-rate cannot be set higher than two times the channel filter '
'bandwidth.')
print()
if not check_receiver_bandwidth_rule(channel_filter_bandwith, frequency_deviation):
failed_checks += 1
if verbose:
print('* Receiver bandwidth rule violated')
print(' The channel filter bandwidth must be set higher than two times the frequency '
'deviation.')
print()
if not check_modulation_index_rule(bitrate, frequency_deviation):
failed_checks += 1
if verbose:
print('* Modulation index rule violated')
print(' The modulation index must be between 0.5 and 10.')
print()
if not check_modulation_rule(bitrate, frequency_deviation):
failed_checks += 1
if verbose:
print('* Modulation rule violated')
print(' fdev + bitrate / 2 <= 500000')
print()
if not check_frequency_deviation_rule(frequency_deviation):
failed_checks += 1
if verbose:
print('* Frequency deviation rule violated')
print(' Frequency deviation must be set higher than 600 Hz.')
print()
if verbose:
if failed_checks == 0:
print('All checks passed')
else:
print('Number of failed checks: {}'.format(failed_checks))
return failed_checks
def main():
parser = argparse.ArgumentParser()
parser.add_argument('bitrate', help="Transceiver bit-rate in b/s", type=int)
parser.add_argument('rxbw', help="Channel filter bandwidth in Hz", type=int)
parser.add_argument('fdev', help="Frequency deviation in Hz", type=int)
parser.add_argument('-v', '--verbose', action='store_true')
args = parser.parse_args()
return check_all_rules(args.bitrate, args.rxbw, args.fdev, args.verbose)
if __name__ == '__main__':
exit(main())
| gpl-3.0 |
gem/oq-hazardlib | openquake/hazardlib/scalerel/gsc_offshore_thrusts.py | 1 | 6407 | # -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright (C) 2015-2017 GEM Foundation
#
# OpenQuake is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OpenQuake is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with OpenQuake. If not, see <http://www.gnu.org/licenses/>.
"""
Rupture scaling models as used for the 2015 Seismic Hazard Model of Canada, as
described in Adams, J., S. Halchuk, T. Allen, and G. Rogers (2015). Canada's
5th Generation seismic hazard model, as prepared for the 2015 National Building
Code of Canada, 11th Canadian Conference on Earthquake Engineering, Victoria,
Canada, Paper 93775.
Module :mod:`openquake.hazardlib.scalerel.gsc_offshore_thrusts` implements
:class:`GSCCascadia`
:class:`GSCEISO`
:class:`GSCEISB`
:class:`GSCEISI`
:class:`GSCOffshoreThrustsWIN`
:class:`GSCOffshoreThrustsHGT`.
"""
from openquake.hazardlib.scalerel.base import BaseMSRSigma
from math import sin, radians
class GSCCascadia(BaseMSRSigma):
"""
Implements magnitude-area scaling relationship for the Juan de Fuca segment
of the Cascadia subduction zone.
:param SEIS_WIDTH:
Hard-wired seismogenic width of the CIS source (125 km)
"""
SEIS_WIDTH = 125.0
def get_median_area(self, mag, rake):
"""
The values are a function of magnitude.
"""
# thrust/reverse
return (10.0 ** (3.01 + 0.001 * mag)) * self.SEIS_WIDTH
def get_std_dev_area(self, mag, rake):
"""
Standard deviation for GSCCascadia. Magnitude is ignored.
"""
# thrust/reverse
return 0.01
class GSCEISO(BaseMSRSigma):
"""
Implements magnitude-area scaling relationship for the outboard estimate of
rupture (16 km depth) for the Explorer segment of the Cascadia subduction
zone with an upper seismogenic depth of 5 km and a dip of 18 degrees.
"""
# Thickness between 16 km lower seismogenic depth and 5 km upper
# seismogenic depth
SEIS_WIDTH = 11. / sin(radians(18.0))
def get_median_area(self, mag, rake):
"""
The values are a function of magnitude.
"""
# thrust/reverse
return (10.0 ** (1.90 + 0.001 * mag)) * self.SEIS_WIDTH
def get_std_dev_area(self, mag, rake):
"""
Standard deviation for GSCCascadia. Magnitude is ignored.
"""
# thrust/reverse
return 0.01
class GSCEISB(BaseMSRSigma):
"""
Implements magnitude-area scaling relationship for best estimate landward
extent of rupture (22 km depth) for the Explorer segment of the Cascadia
subduction zone with an upper seismogenic depth of 5 km and a dip of 18
degrees.
"""
# Thickness between 22 km lower seismogenic depth and 5 km upper
# seismogenic depth
SEIS_WIDTH = 17.0 / sin(radians(18.))
def get_median_area(self, mag, rake):
"""
The values are a function of magnitude.
"""
# thrust/reverse
return (10.0 ** (1.90 + 0.001 * mag)) * self.SEIS_WIDTH
def get_std_dev_area(self, mag, rake):
"""
Standard deviation for GSCCascadia. Magnitude is ignored.
"""
# thrust/reverse
return 0.01
class GSCEISI(BaseMSRSigma):
"""
Implements magnitude-area scaling relationship for the inboard estimate of
rupture (28 km depth) for the Explorer segment of the Cascadia subduction
zone with an upper seismogenitc depth of 5 km and a dip of 18 degrees.
"""
# Thickness between 28 km lower seismogenic depth and 5 km upper
# seismogenic depth
SEIS_WIDTH = 23.0 / sin(radians(18.))
def get_median_area(self, mag, rake):
"""
The values are a function of magnitude.
"""
# thrust/reverse
return (10.0 ** (1.90 + 0.001 * mag)) * self.SEIS_WIDTH
def get_std_dev_area(self, mag, rake):
"""
Standard deviation for GSCCascadia. Magnitude is ignored.
"""
# thrust/reverse
return 0.01
class GSCOffshoreThrustsWIN(BaseMSRSigma):
"""
Implements magnitude-area scaling relationship for the Winona segment of
the Jan de Fuca subduction zone that is approximately scaled to give a
rupture length of 300 km for a MW 8 earthquake and fit the rupture length
of the M7.8 2012 Haida Gwaii earthquake. Ruptures assume an upper and
lower seismogenic depth of 2 km and 5 km respectively, with a dip of 15
degrees.
"""
# Thickness between 5 km lower seismogenic depth and 2 km upper
# seismogenic depth
SEIS_WIDTH = 3.0 / sin(radians(15.0))
def get_median_area(self, mag, rake):
"""
The values are a function of magnitude.
"""
# thrust/reverse for WIN
return (10.0 ** (-2.943 + 0.677 * mag)) * self.SEIS_WIDTH
def get_std_dev_area(self, mag, rake):
"""
Standard deviation for GSCOffshoreThrustsWIN. Magnitude is ignored.
"""
# thrust/reverse
return 0.2
class GSCOffshoreThrustsHGT(BaseMSRSigma):
"""
Implements magnitude-area scaling relationship that is approximately scaled
to give a rupture length of 300 km for a MW 8 earthquake and fit the
rupture length of the M7.8 2012 Haida Gwaii earthquake. Ruptures assume an
upper and lower seismogenitc depth of 3 km and 22 km, respectively, with a
dip of 25 degrees.
"""
# Thickness between 22 km lower seismogenic depth and 3 km upper
# seismogenic depth
SEIS_WIDTH = 19.0 / sin(radians(25.0)) # 19 = 22 - 3
def get_median_area(self, mag, rake):
"""
The values are a function of magnitude.
"""
# thrust/reverse for HGT
return (10.0 ** (-2.943 + 0.677 * mag)) * self.SEIS_WIDTH
def get_std_dev_area(self, mag, rake):
"""
Standard deviation for GSCOffshoreThrustsHGT. Magnitude is ignored.
"""
# thrust/reverse
return 0.2
| agpl-3.0 |
uiri/pxqz | venv/lib/python2.7/site-packages/django/contrib/gis/geos/prototypes/prepared.py | 623 | 1032 | from ctypes import c_char
from django.contrib.gis.geos.libgeos import GEOM_PTR, PREPGEOM_PTR
from django.contrib.gis.geos.prototypes.errcheck import check_predicate
from django.contrib.gis.geos.prototypes.threadsafe import GEOSFunc
# Prepared geometry constructor and destructors.
geos_prepare = GEOSFunc('GEOSPrepare')
geos_prepare.argtypes = [GEOM_PTR]
geos_prepare.restype = PREPGEOM_PTR
prepared_destroy = GEOSFunc('GEOSPreparedGeom_destroy')
prepared_destroy.argtpes = [PREPGEOM_PTR]
prepared_destroy.restype = None
# Prepared geometry binary predicate support.
def prepared_predicate(func):
func.argtypes= [PREPGEOM_PTR, GEOM_PTR]
func.restype = c_char
func.errcheck = check_predicate
return func
prepared_contains = prepared_predicate(GEOSFunc('GEOSPreparedContains'))
prepared_contains_properly = prepared_predicate(GEOSFunc('GEOSPreparedContainsProperly'))
prepared_covers = prepared_predicate(GEOSFunc('GEOSPreparedCovers'))
prepared_intersects = prepared_predicate(GEOSFunc('GEOSPreparedIntersects'))
| gpl-3.0 |
CMSS-BCRDB/RDS | trove/guestagent/datastore/experimental/postgresql/service/process.py | 4 | 3478 | # Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from trove.common import cfg
from trove.common import utils
from trove.guestagent.common import operating_system
from trove.guestagent.datastore.experimental.postgresql.service.status import (
PgSqlAppStatus)
from trove.openstack.common import log as logging
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
PGSQL_SERVICE_CANDIDATES = ("postgresql",)
class PgSqlProcess(object):
"""Mixin that manages the PgSql process."""
def start_db(self, context):
self._enable_pgsql_on_boot()
"""Start the PgSql service."""
cmd = operating_system.service_discovery(PGSQL_SERVICE_CANDIDATES)
LOG.info(
_("{guest_id}: Starting database engine with command ({command}).")
.format(
guest_id=CONF.guest_id,
command=cmd['cmd_start'],
)
)
utils.execute_with_timeout(
*cmd['cmd_start'].split(),
timeout=30
)
def _enable_pgsql_on_boot(self):
try:
pgsql_service = operating_system.service_discovery(
PGSQL_SERVICE_CANDIDATES)
utils.execute_with_timeout(pgsql_service['cmd_enable'],
shell=True)
except KeyError:
LOG.exception(_("Error enabling PostgreSQL start on boot."))
raise RuntimeError("Service is not discovered.")
def _disable_pgsql_on_boot(self):
try:
pgsql_service = operating_system.service_discovery(
PGSQL_SERVICE_CANDIDATES)
utils.execute_with_timeout(pgsql_service['cmd_disable'],
shell=True)
except KeyError:
LOG.exception(_("Error disabling PostgreSQL start on boot."))
raise RuntimeError("Service is not discovered.")
def stop_db(self, context, do_not_start_on_reboot=False):
"""Stop the PgSql service."""
if do_not_start_on_reboot:
self._disable_pgsql_on_boot()
cmd = operating_system.service_discovery(PGSQL_SERVICE_CANDIDATES)
LOG.info(
_("{guest_id}: Stopping database engine with command ({command}).")
.format(
guest_id=CONF.guest_id,
command=cmd['cmd_stop'],
)
)
utils.execute_with_timeout(
*cmd['cmd_stop'].split(),
timeout=30
)
def restart(self, context):
"""Restart the PgSql service."""
LOG.info(
_("{guest_id}: Restarting database engine.").format(
guest_id=CONF.guest_id,
)
)
try:
PgSqlAppStatus.get().begin_restart()
self.stop_db(context)
self.start_db(context)
finally:
PgSqlAppStatus.get().end_install_or_restart()
| apache-2.0 |
QuantCrimAtLeeds/PredictCode | open_cp/gui/tk/session_view.py | 1 | 1550 | """
session_view
~~~~~~~~~~~~
"""
import tkinter as tk
import tkinter.ttk as ttk
import open_cp.gui.tk.util as util
_text = {
"none" : "No recent sessions found",
"cancel" : "Cancel",
}
class SessionView(ttk.Frame):
def __init__(self, parent, controller, model):
super().__init__(parent)
self._parent = parent
self.controller = controller
self.model = model
self.master.protocol("WM_DELETE_WINDOW", self.cancel)
self.grid(sticky=util.NSEW)
util.stretchy_rows_cols(self, range(101), [0])
self._add_widgets()
self.resize()
def resize(self, final=False):
self.update_idletasks()
util.centre_window(self._parent, self._parent.winfo_reqwidth(), self._parent.winfo_reqheight())
def _add_widgets(self):
if len(self.model.recent_sessions) == 0:
la = ttk.Label(self, text=_text["none"], anchor=tk.CENTER)
la.grid(row=0, column=0, padx=2, pady=2, sticky=tk.EW)
for index, name in enumerate(self.model.recent_sessions):
b = ttk.Button(self, text=name, command=lambda i=index : self._pressed(i))
b.grid(row=index, column=0, padx=2, pady=2, sticky=tk.NSEW)
b = ttk.Button(self, text=_text["cancel"], command=self.cancel)
b.grid(row=100, column=0, padx=2, pady=2, sticky=tk.NSEW)
def _pressed(self, index):
self.controller.selected(index)
def cancel(self, event=None):
self.destroy()
| artistic-2.0 |
zultron/virt-manager | virtManager/network.py | 2 | 5678 | #
# Copyright (C) 2006, 2013 Red Hat, Inc.
# Copyright (C) 2006 Daniel P. Berrange <berrange@redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301 USA.
#
import ipaddr
from virtinst import Network
from virtManager.libvirtobject import vmmLibvirtObject
def _make_addr_str(addrStr, prefix, netmaskStr):
if prefix:
return str(ipaddr.IPNetwork(str(addrStr) + "/" +
str(prefix)).masked())
elif netmaskStr:
netmask = ipaddr.IPAddress(netmaskStr)
network = ipaddr.IPAddress(addrStr)
return str(ipaddr.IPNetwork(str(network) + "/" +
str(netmask)).masked())
else:
return str(ipaddr.IPNetwork(str(addrStr)))
class vmmNetwork(vmmLibvirtObject):
def __init__(self, conn, backend, key):
vmmLibvirtObject.__init__(self, conn, backend, key, Network)
self._active = True
self._support_isactive = None
self.tick()
##########################
# Required class methods #
##########################
def get_name(self):
return self._backend.name()
def _XMLDesc(self, flags):
return self._backend.XMLDesc(flags)
def _define(self, xml):
return self.conn.define_network(xml)
###########
# Actions #
###########
def _backend_get_active(self):
if self._support_isactive is None:
self._support_isactive = self.conn.check_support(
self.conn.SUPPORT_NET_ISACTIVE, self._backend)
if not self._support_isactive:
return True
return bool(self._backend.isActive())
def _set_active(self, state):
if state == self._active:
return
self.idle_emit(state and "started" or "stopped")
self._active = state
def is_active(self):
return self._active
def _kick_conn(self):
self.conn.schedule_priority_tick(pollnet=True)
def start(self):
self._backend.create()
self._kick_conn()
def stop(self):
self._backend.destroy()
self._kick_conn()
def delete(self, force=True):
ignore = force
self._backend.undefine()
self._backend = None
self._kick_conn()
def get_autostart(self):
return self._backend.autostart()
def set_autostart(self, value):
self._backend.setAutostart(value)
def tick(self):
self._set_active(self._backend_get_active())
def define_name(self, newname):
return self._define_name_helper("network",
self.conn.rename_network,
newname)
###############
# XML parsing #
###############
def get_uuid(self):
return self.get_xmlobj().uuid
def get_bridge_device(self):
return self.get_xmlobj().bridge
def get_name_domain(self):
return self.get_xmlobj().domain_name
def get_ipv6_enabled(self):
return self.get_xmlobj().ipv6
def get_ipv4_forward_mode(self):
return self.get_xmlobj().forward.mode
def pretty_forward_mode(self):
return self.get_xmlobj().forward.pretty_desc()
def can_pxe(self):
return self.get_xmlobj().can_pxe()
def _get_static_route(self, family):
xmlobj = self.get_xmlobj()
route = None
for r in xmlobj.routes:
if (r.family == family or (family == "ipv4" and not r.family)):
route = r
break
if not route:
return [None, None]
routeAddr = _make_addr_str(route.address, route.prefix, route.netmask)
routeVia = str(ipaddr.IPAddress(str(route.gateway)))
if not routeAddr or not routeVia:
return [None, None]
return [routeAddr, routeVia]
def _get_network(self, family):
dhcpstart = None
dhcpend = None
xmlobj = self.get_xmlobj()
ip = None
for i in xmlobj.ips:
if (i.family == family or
(family == "ipv4" and not i.family)):
if i.ranges:
ip = i
dhcpstart = i.ranges[0].start
dhcpend = i.ranges[0].end
break
if not ip:
for i in xmlobj.ips:
if (i.family == family or
(family == "ipv4" and not i.family)):
ip = i
break
ret = None
if ip:
ret = _make_addr_str(ip.address, ip.prefix, ip.netmask)
dhcp = [None, None]
if dhcpstart and dhcpend:
dhcp = [str(ipaddr.IPAddress(dhcpstart)),
str(ipaddr.IPAddress(dhcpend))]
return [ret, dhcp]
def get_ipv4_network(self):
ret = self._get_network("ipv4")
return ret + [self._get_static_route("ipv4")]
def get_ipv6_network(self):
ret = self._get_network("ipv6")
return ret + [self._get_static_route("ipv6")]
| gpl-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.